hw: rename Kernel::Processor Kernel::Cpu

Kernel::Processor was a confusing remnant from the old scheme where we had a
Processor_driver (now Genode::Cpu) and a Processor (now Kernel::Cpu).
This commit also updates the in-code documentation and the variable and
function naming accordingly.

fix #1274
This commit is contained in:
Martin Stein 2014-10-10 16:13:52 +02:00 committed by Christian Helmuth
parent b3655902ed
commit b8ba3a7a22
48 changed files with 563 additions and 670 deletions

View File

@ -265,26 +265,21 @@ class Genode::Core_start_info
{
private:
unsigned _processor_id;
unsigned _cpu_id;
public:
/**
* Set-up valid core startup-message
*
* \param processor_id kernel name of the processor to start on
* Set-up valid core startup-message for starting on 'cpu'
*/
void init(unsigned const processor_id)
{
_processor_id = processor_id;
}
void init(unsigned const cpu) { _cpu_id = cpu; }
/***************
** Accessors **
***************/
unsigned processor_id() const { return _processor_id; }
unsigned cpu_id() const { return _cpu_id; }
};
class Genode::Native_utcb

View File

@ -54,7 +54,7 @@ SRC_CC += kernel/vm.cc
SRC_CC += kernel/signal_receiver.cc
SRC_CC += kernel/irq.cc
SRC_CC += kernel/pd.cc
SRC_CC += kernel/processor.cc
SRC_CC += kernel/cpu.cc
# add assembly sources
SRC_S += boot_modules.s

View File

@ -5,4 +5,4 @@
#
# configure multiprocessor mode
CC_OPT += -Wa,--defsym -Wa,PROCESSORS=$(PROCESSORS) -DPROCESSORS=$(PROCESSORS)
CC_OPT += -Wa,--defsym -Wa,NR_OF_CPUS=$(NR_OF_CPUS) -DNR_OF_CPUS=$(NR_OF_CPUS)

View File

@ -8,7 +8,7 @@
SPECS += hw platform_arndale
# configure multiprocessor mode
PROCESSORS = 2
NR_OF_CPUS = 2
# add repository relative paths
REP_INC_DIR += include/exynos5_uart

View File

@ -8,7 +8,7 @@
SPECS += hw platform_imx31 epit
# configure multiprocessor mode
PROCESSORS = 1
NR_OF_CPUS = 1
# set address where to link the text segment at
LD_TEXT_ADDR ?= 0x82000000

View File

@ -8,7 +8,7 @@
SPECS += hw platform_imx53 epit
# configure multiprocessor mode
PROCESSORS = 1
NR_OF_CPUS = 1
# set address where to link the text segment at
LD_TEXT_ADDR ?= 0x70010000

View File

@ -8,7 +8,7 @@
SPECS += hw platform_odroid_xu
# configure multiprocessor mode
PROCESSORS = 1
NR_OF_CPUS = 1
# add repository relative paths
REP_INC_DIR += include/exynos5_uart

View File

@ -8,7 +8,7 @@
SPECS += hw platform_panda
# configure multiprocessor mode
PROCESSORS = 1
NR_OF_CPUS = 1
# set address where to link the text segment at
LD_TEXT_ADDR ?= 0x81000000

View File

@ -8,7 +8,7 @@
SPECS += hw platform_pbxa9
# configure multiprocessor mode
PROCESSORS = 1
NR_OF_CPUS = 1
# set address where to link text segment at
LD_TEXT_ADDR ?= 0x70000000

View File

@ -8,7 +8,7 @@
SPECS += hw platform_rpi
# configure multiprocessor mode
PROCESSORS = 1
NR_OF_CPUS = 1
# set address where to link the text segment at
LD_TEXT_ADDR ?= 0x800000

View File

@ -8,7 +8,7 @@
SPECS += hw platform_vea9x4
# configure multiprocessor mode
PROCESSORS = 1
NR_OF_CPUS = 1
# set address where to link text segment at
LD_TEXT_ADDR ?= 0x01000000

View File

@ -26,10 +26,9 @@ namespace Genode
static void outer_cache_invalidate() { }
static void outer_cache_flush() { }
static void prepare_kernel() { }
static void secondary_processors_ip(void * const ip) { }
static void secondary_cpus_ip(void * const ip) { }
static bool is_smp() { return false; }
};
};
}
#endif /* _BOARD_H_ */

View File

@ -85,7 +85,7 @@ namespace Kernel
*
* Kernel and/or hardware may cache parts of a domain configuration. This
* function ensures that the in-memory state of the targeted domain gets
* processor locally effective.
* CPU-locally effective.
*/
inline void update_pd(unsigned const pd_id)
{
@ -137,7 +137,7 @@ namespace Kernel
* Start execution of a thread
*
* \param thread_id kernel name of the targeted thread
* \param cpu_id kernel name of the targeted processor
* \param cpu_id kernel name of the targeted CPU
* \param pd_id kernel name of the targeted domain
* \param utcb core local pointer to userland thread-context
*/
@ -287,7 +287,7 @@ namespace Kernel
* Create a virtual machine that is stopped initially
*
* \param dst memory donation for the VM object
* \param state location of the processor state of the VM
* \param state location of the CPU state of the VM
* \param signal_context_id kernel name of the signal context for VM events
*
* \retval >0 kernel name of the new VM

View File

@ -1,5 +1,5 @@
/*
* \brief A multiplexable common instruction processor
* \brief Class for kernel data that is needed to manage a specific CPU
* \author Martin Stein
* \author Stefan Kalkowski
* \date 2014-01-14
@ -12,8 +12,8 @@
* under the terms of the GNU General Public License version 2.
*/
#ifndef _KERNEL__PROCESSOR_H_
#define _KERNEL__PROCESSOR_H_
#ifndef _KERNEL__CPU_H_
#define _KERNEL__CPU_H_
/* core includes */
#include <timer.h>
@ -31,9 +31,9 @@ namespace Kernel
class Cpu_job;
/**
* Ability to do a domain update on all processors
* Ability to do a domain update on all CPUs
*/
class Processor_domain_update;
class Cpu_domain_update;
/**
* Execution context that is scheduled on CPU idle
@ -41,37 +41,37 @@ namespace Kernel
class Cpu_idle;
/**
* A multiplexable common instruction processor
* Class for kernel data that is needed to manage a specific CPU
*/
class Processor;
class Cpu;
/**
* Provides a processor object for every available processor
* Provides a CPU object for every available CPU
*/
class Processor_pool;
class Cpu_pool;
/**
* Return Processor_pool singleton
* Return singleton of CPU pool
*/
Processor_pool * processor_pool();
Cpu_pool * cpu_pool();
}
class Kernel::Processor_domain_update : public Double_list_item
class Kernel::Cpu_domain_update : public Double_list_item
{
friend class Processor_domain_update_list;
friend class Cpu_domain_update_list;
private:
bool _pending[PROCESSORS];
bool _pending[NR_OF_CPUS];
unsigned _domain_id;
/**
* Domain-update back-end
*/
void _domain_update() { Cpu::flush_tlb_by_pid(_domain_id); }
void _domain_update() { Genode::Cpu::flush_tlb_by_pid(_domain_id); }
/**
* Perform the domain update on the executing processors
* Perform the domain update on the executing CPU
*/
void _do();
@ -80,73 +80,63 @@ class Kernel::Processor_domain_update : public Double_list_item
/**
* Constructor
*/
Processor_domain_update()
Cpu_domain_update()
{
for (unsigned i = 0; i < PROCESSORS; i++) { _pending[i] = false; }
for (unsigned i = 0; i < NR_OF_CPUS; i++) { _pending[i] = false; }
}
/**
* Perform the domain update on all processors
*
* \param domain_id kernel name of targeted domain
*
* \return wether the update blocks and reports back on completion
* Do an update of domain 'id' on all CPUs and return if this blocks
*/
bool _do_global(unsigned const domain_id);
bool _do_global(unsigned const id);
/**
* Notice that the update isn't pending on any processor anymore
* Notice that the update isn't pending on any CPU anymore
*/
virtual void _processor_domain_update_unblocks() = 0;
virtual void _cpu_domain_update_unblocks() = 0;
};
class Kernel::Cpu_job : public Cpu_share
{
protected:
Processor * _cpu;
Cpu * _cpu;
Cpu_lazy_state _lazy_state;
/**
* Handle an interrupt exception that occured during execution
*
* \param processor_id kernel name of targeted processor
* Handle interrupt exception that occured during execution on CPU 'id'
*/
void _interrupt(unsigned const processor_id);
void _interrupt(unsigned const id);
/**
* Insert context into the processor scheduling
* Insert context into the scheduling of this CPU
*/
void _schedule();
/**
* Remove context from the processor scheduling
* Remove context from the scheduling of this CPU
*/
void _unschedule();
/**
* Yield currently scheduled processor share of the context
* Yield the currently scheduled CPU share of this context
*/
void _yield();
public:
/**
* Handle an exception that occured during execution
*
* \param processor_id kernel name of targeted processor
* Handle exception that occured during execution on CPU 'id'
*/
virtual void exception(unsigned const processor_id) = 0;
virtual void exception(unsigned const id) = 0;
/**
* Continue execution
*
* \param processor_id kernel name of targeted processor
* Continue execution on CPU 'id'
*/
virtual void proceed(unsigned const processor_id) = 0;
virtual void proceed(unsigned const id) = 0;
/**
* Construct a job with scheduling priority 'prio'
* Construct a job with scheduling priority 'p'
*/
Cpu_job(Cpu_priority const p) : Cpu_share(p, 0), _cpu(0) { }
@ -158,17 +148,17 @@ class Kernel::Cpu_job : public Cpu_share
/**
* Link job to CPU 'cpu'
*/
void affinity(Processor * const cpu);
void affinity(Cpu * const cpu);
/***************
** Accessors **
***************/
void cpu(Processor * const cpu) { _cpu = cpu; }
void cpu(Cpu * const cpu) { _cpu = cpu; }
Cpu_lazy_state * lazy_state() { return &_lazy_state; }
};
class Kernel::Cpu_idle : public Cpu::User_context, public Cpu_job
class Kernel::Cpu_idle : public Genode::Cpu::User_context, public Cpu_job
{
private:
@ -179,14 +169,14 @@ class Kernel::Cpu_idle : public Cpu::User_context, public Cpu_job
/**
* Main function of all idle threads
*/
static void _main() { while (1) { Cpu::wait_for_interrupt(); } }
static void _main() { while (1) { Genode::Cpu::wait_for_interrupt(); } }
public:
/**
* Construct idle context for CPU 'cpu'
*/
Cpu_idle(Processor * const cpu);
Cpu_idle(Cpu * const cpu);
/**
* Handle exception that occured during execution on CPU 'cpu'
@ -206,7 +196,7 @@ class Kernel::Cpu_idle : public Cpu::User_context, public Cpu_job
void proceed(unsigned const cpu_id);
};
class Kernel::Processor : public Cpu
class Kernel::Cpu : public Genode::Cpu
{
private:
@ -227,7 +217,7 @@ class Kernel::Processor : public Cpu
/**
* Construct object for CPU 'id' with scheduling timer 'timer'
*/
Processor(unsigned const id, Timer * const timer)
Cpu(unsigned const id, Timer * const timer)
:
_id(id), _idle(this), _timer(timer),
_scheduler(&_idle, _quota(), _fill()),
@ -239,12 +229,12 @@ class Kernel::Processor : public Cpu
bool timer_irq(unsigned const i) { return _timer->interrupt_id(_id) == i; }
/**
* Notice that the inter-processor interrupt isn't pending anymore
* Notice that the IPI of the CPU isn't pending anymore
*/
void ip_interrupt_handled() { _ip_interrupt_pending = false; }
/**
* Raise the inter-processor interrupt of the processor
* Raise the IPI of the CPU
*/
void trigger_ip_interrupt();
@ -254,7 +244,7 @@ class Kernel::Processor : public Cpu
void schedule(Job * const job);
/**
* Handle exception of the processor and proceed its user execution
* Handle recent exception of the CPU and proceed its user execution
*/
void exception()
{
@ -291,39 +281,38 @@ class Kernel::Processor : public Cpu
Cpu_scheduler * scheduler() { return &_scheduler; }
};
class Kernel::Processor_pool
class Kernel::Cpu_pool
{
private:
Timer _timer;
char _processors[PROCESSORS][sizeof(Processor)];
char _cpus[NR_OF_CPUS][sizeof(Cpu)];
public:
/**
* Constructor
* Construct pool and thereby objects for all available CPUs
*/
Processor_pool()
Cpu_pool()
{
for (unsigned id = 0; id < PROCESSORS; id++) {
new (_processors[id]) Processor(id, &_timer); }
for (unsigned id = 0; id < NR_OF_CPUS; id++) {
new (_cpus[id]) Cpu(id, &_timer); }
}
/**
* Return the kernel object of processor 'id'
* Return object of CPU 'id'
*/
Processor * processor(unsigned const id) const
Cpu * cpu(unsigned const id) const
{
assert(id < PROCESSORS);
char * const p = const_cast<char *>(_processors[id]);
return reinterpret_cast<Processor *>(p);
assert(id < NR_OF_CPUS);
char * const p = const_cast<char *>(_cpus[id]);
return reinterpret_cast<Cpu *>(p);
}
/**
* Return the object of the primary processor
* Return object of primary CPU
*/
Processor * primary_processor() const {
return processor(Processor::primary_id()); }
Cpu * primary_cpu() const { return cpu(Cpu::primary_id()); }
};
#endif /* _KERNEL__PROCESSOR_H_ */
#endif /* _KERNEL__CPU_H_ */

View File

@ -23,7 +23,7 @@
#include <kernel/early_translations.h>
#include <kernel/configuration.h>
#include <kernel/object.h>
#include <kernel/processor.h>
#include <kernel/cpu.h>
#include <translation_table.h>
#include <assert.h>
@ -77,7 +77,7 @@ class Kernel::Lock
namespace Kernel
{
/**
* Processor context of the kernel
* CPU context of the kernel
*/
class Cpu_context;
@ -179,16 +179,16 @@ class Kernel::Mode_transition_control
/**
* Continue execution of client context
*
* \param context targeted client processor-context
* \param processor_id kernel name of targeted processor
* \param entry_raw raw pointer to assembly entry-code
* \param context targeted CPU context
* \param cpu kernel name of targeted CPU
* \param entry_raw raw pointer to assembly entry-code
*/
void _continue_client(void * const context, unsigned const processor_id,
void _continue_client(void * const context, unsigned const cpu,
addr_t const entry_raw)
{
/* override client-context pointer of the executing processor */
/* override client-context pointer of the executing CPU */
addr_t const context_ptr_base = (addr_t)&_mt_client_context_ptr;
size_t const context_ptr_offset = processor_id * sizeof(context);
size_t const context_ptr_offset = cpu * sizeof(context);
addr_t const context_ptr = context_ptr_base + context_ptr_offset;
*(void * *)context_ptr = context;
@ -206,7 +206,7 @@ class Kernel::Mode_transition_control
enum {
SIZE_LOG2 = Genode::Translation_table::MIN_PAGE_SIZE_LOG2,
SIZE = 1 << SIZE_LOG2,
VIRT_BASE = Processor::exception_entry,
VIRT_BASE = Cpu::exception_entry,
ALIGN_LOG2 = Genode::Translation_table::ALIGNM_LOG2,
ALIGN = 1 << ALIGN_LOG2,
};
@ -236,28 +236,16 @@ class Kernel::Mode_transition_control
}
/**
* Continue execution of userland context
*
* \param context targeted userland context
* \param processor_id kernel name of targeted processor
* Continue execution of 'user' at 'cpu'
*/
void continue_user(Processor::Context * const context,
unsigned const processor_id)
{
_continue_client(context, processor_id, _virt_user_entry());
}
void continue_user(Cpu::Context * const user, unsigned const cpu) {
_continue_client(user, cpu, _virt_user_entry()); }
/**
* Continue execution of virtual machine
*
* \param context targeted virtual-machine context
* \param processor_id kernel name of targeted processor
* Continue execution of 'vm' at 'cpu'
*/
void continue_vm(Cpu_state_modes * const context,
unsigned const processor_id)
{
_continue_client(context, processor_id, (addr_t)&_mt_vm_entry_pic);
}
void continue_vm(Cpu_state_modes * const vm, unsigned const cpu) {
_continue_client(vm, cpu, (addr_t)&_mt_vm_entry_pic); }
} __attribute__((aligned(Mode_transition_control::ALIGN)));
@ -290,7 +278,7 @@ class Kernel::Pd : public Object<Pd, MAX_PDS, Pd_ids, pd_ids, pd_pool>
/**
* Let the CPU context 'c' join the PD
*/
void admit(Processor::Context * const c)
void admit(Cpu::Context * const c)
{
c->protection_domain(id());
c->translation_table((addr_t)translation_table());

View File

@ -18,7 +18,7 @@
#include <kernel/configuration.h>
#include <kernel/signal_receiver.h>
#include <kernel/ipc_node.h>
#include <kernel/processor.h>
#include <kernel/cpu.h>
#include <kernel/thread_base.h>
namespace Kernel
@ -44,12 +44,8 @@ class Kernel::Thread
:
public Cpu::User_context,
public Object<Thread, MAX_THREADS, Thread_ids, thread_ids, thread_pool>,
public Cpu_job,
public Processor_domain_update,
public Ipc_node,
public Signal_context_killer,
public Signal_handler,
public Thread_base
public Cpu_job, public Cpu_domain_update, public Ipc_node,
public Signal_context_killer, public Signal_handler, public Thread_base
{
friend class Thread_event;
@ -262,11 +258,11 @@ class Kernel::Thread
void _await_request_failed();
/*****************************
** Processor_domain_update **
*****************************/
/***********************
** Cpu_domain_update **
***********************/
void _processor_domain_update_unblocks() { _resume(); }
void _cpu_domain_update_unblocks() { _resume(); }
public:
@ -281,21 +277,21 @@ class Kernel::Thread
/**
* Prepare thread to get scheduled the first time
*
* \param processor targeted processor
* \param pd targeted domain
* \param utcb core local pointer to userland thread-context
* \param start wether to start executing the thread
* \param cpu targeted CPU
* \param pd targeted domain
* \param utcb core local pointer to userland thread-context
* \param start wether to start executing the thread
*/
void init(Processor * const processor, Pd * const pd,
Native_utcb * const utcb, bool const start);
void init(Cpu * const cpu, Pd * const pd, Native_utcb * const utcb,
bool const start);
/*************
** Cpu_job **
*************/
void exception(unsigned const processor_id);
void proceed(unsigned const processor_id);
void exception(unsigned const cpu);
void proceed(unsigned const cpu);
/***************

View File

@ -61,7 +61,7 @@ class Kernel::Vm : public Object<Vm, MAX_VMS, Vm_ids, vm_ids, vm_pool>,
:
Cpu_job(Cpu_priority::min), _state((Vm_state * const)state),
_context(context)
{ Cpu_job::affinity(processor_pool()->primary_processor()); }
{ affinity(cpu_pool()->primary_cpu()); }
/****************
@ -69,7 +69,6 @@ class Kernel::Vm : public Object<Vm, MAX_VMS, Vm_ids, vm_ids, vm_pool>,
****************/
void run() { Cpu_job::_schedule(); }
void pause() { Cpu_job::_unschedule(); }
@ -77,25 +76,22 @@ class Kernel::Vm : public Object<Vm, MAX_VMS, Vm_ids, vm_ids, vm_pool>,
** Cpu_job **
*************/
void exception(unsigned const processor_id)
void exception(unsigned const cpu)
{
switch(_state->cpu_exception) {
case Genode::Cpu_state::INTERRUPT_REQUEST:
case Genode::Cpu_state::FAST_INTERRUPT_REQUEST:
_interrupt(processor_id);
_interrupt(cpu);
return;
case Genode::Cpu_state::DATA_ABORT:
_state->dfar = Processor::Dfar::read();
_state->dfar = Cpu::Dfar::read();
default:
Cpu_job::_unschedule();
_context->submit(1);
}
}
void proceed(unsigned const processor_id)
{
mtc()->continue_vm(_state, processor_id);
}
void proceed(unsigned const cpu) { mtc()->continue_vm(_state, cpu); }
};
#endif /* _KERNEL__VM_H_ */

View File

@ -36,58 +36,60 @@ namespace Genode {
*/
class Platform : public Platform_generic
{
typedef Core_mem_allocator::Phys_allocator Phys_allocator;
private:
Core_mem_allocator _core_mem_alloc; /* core-accessible memory */
Phys_allocator _io_mem_alloc; /* MMIO allocator */
Phys_allocator _irq_alloc; /* IRQ allocator */
Rom_fs _rom_fs; /* ROM file system */
typedef Core_mem_allocator::Phys_allocator Phys_allocator;
/*
* Virtual-memory range for non-core address spaces.
* The virtual memory layout of core is maintained in
* '_core_mem_alloc.virt_alloc()'.
*/
addr_t _vm_start;
size_t _vm_size;
Core_mem_allocator _core_mem_alloc; /* core-accessible memory */
Phys_allocator _io_mem_alloc; /* MMIO allocator */
Phys_allocator _irq_alloc; /* IRQ allocator */
Rom_fs _rom_fs; /* ROM file system */
/*
* Virtual-memory range for non-core address spaces.
* The virtual memory layout of core is maintained in
* '_core_mem_alloc.virt_alloc()'.
*/
addr_t _vm_start;
size_t _vm_size;
public:
/**
* Get one of the consecutively numbered available resource regions
*
* \return >0 region pointer if region with index 'i' exists
* 0 if region with index 'i' doesn't exist
*
* These functions should provide all ressources that are available
* on the current platform.
*/
static Native_region * _ram_regions(unsigned i);
static Native_region * _mmio_regions(unsigned i);
/**
* Get one of the consecutively numbered available resource regions
*
* \return >0 region pointer if region with index 'i' exists
* 0 if region with index 'i' doesn't exist
*
* These functions should provide all ressources that are available
* on the current platform.
*/
static Native_region * _ram_regions(unsigned i);
static Native_region * _mmio_regions(unsigned i);
/**
* Get one of the consecutively numbered core regions
*
* \return >0 Region pointer if region with index 'i' exists
* 0 If region with index 'i' doesn't exist
*
* Core regions are address regions that must be permitted to
* core only, such as the core image ROM. These regions are normally
* a subset of the ressource regions provided above.
*/
static Native_region * _core_only_ram_regions(unsigned i);
static Native_region * _core_only_mmio_regions(unsigned i);
/**
* Get one of the consecutively numbered core regions
*
* \return >0 Region pointer if region with index 'i' exists
* 0 If region with index 'i' doesn't exist
*
* Core regions are address regions that must be permitted to
* core only, such as the core image ROM. These regions are
* normally a subset of the ressource regions provided above.
*/
static Native_region * _core_only_ram_regions(unsigned i);
static Native_region * _core_only_mmio_regions(unsigned i);
/**
* Get one of the consecutively numbered user interrupts
*
* \param i index of interrupt
*
* \return >0 pointer to the name of the requested interrupt
* 0 no interrupt for that index
*/
static unsigned * _irq(unsigned const i);
/**
* Get one of the consecutively numbered user interrupts
*
* \param i index of interrupt
*
* \return >0 pointer to the name of the requested interrupt
* 0 no interrupt for that index
*/
static unsigned * _irq(unsigned const i);
/**
* Constructor
@ -120,17 +122,13 @@ namespace Genode {
inline Rom_fs *rom_fs() { return &_rom_fs; }
inline void wait_for_exit()
{
while (1) { Kernel::pause_current_thread(); }
};
inline void wait_for_exit() {
while (1) { Kernel::pause_current_thread(); } };
bool supports_direct_unmap() const { return 1; }
Affinity::Space affinity_space() const
{
return Affinity::Space(PROCESSORS);
}
Affinity::Space affinity_space() const {
return Affinity::Space(NR_OF_CPUS); }
};
}

View File

@ -19,10 +19,10 @@
*/
.macro _init_kernel_sp base_reg, size_reg
/* get kernel name of processor */
_get_processor_id sp
/* get kernel name of CPU */
_get_cpu_id sp
/* calculate top of the kernel-stack of this processor and apply as SP */
/* calculate top of the kernel-stack of this CPU and apply as SP */
add sp, #1
mul \size_reg, \size_reg, sp
add sp, \base_reg, \size_reg

View File

@ -32,7 +32,7 @@
.set IRQ_PC_ADJUST, 4
.set FIQ_PC_ADJUST, 4
/* offsets of the member variables in a processor context */
/* offsets of the member variables in a CPU context */
.set R12_OFFSET, 12 * 4
.set SP_OFFSET, 13 * 4
.set LR_OFFSET, 14 * 4
@ -61,19 +61,19 @@
.global _mt_master_context_end
_mt_master_context_end:
/* space for a client context-pointer per processor */
/* space for a client context-pointer per CPU */
.p2align 2
.global _mt_client_context_ptr
_mt_client_context_ptr:
.rept PROCESSORS
.rept NR_OF_CPUS
.space CONTEXT_PTR_SIZE
.endr
/* a globally mapped buffer per processor */
/* a globally mapped buffer per CPU */
.p2align 2
.global _mt_buffer
_mt_buffer:
.rept PROCESSORS
.rept NR_OF_CPUS
.space BUFFER_SIZE
.endr

View File

@ -432,7 +432,7 @@ class Genode::Translation_table
pt_phys = pt_phys ? pt_phys : pt; /* hack for core */
_entries[i] = Page_table_descriptor::create(pt_phys);
/* some processors need to act on changed translations */
/* some CPUs need to act on changed translations */
size_t const dsize = sizeof(Descriptor::access_t);
Cpu::translation_added((addr_t)&_entries[i], dsize);
}

View File

@ -78,7 +78,7 @@ class Genode::Arm_gic_distributor : public Mmio
struct Priority : Bitfield<0, 8> { }; };
/**
* Interrupt processor target registers
* Interrupt CPU-target registers
*/
struct Itargetsr : Register_array<0x800, 32, nr_of_irq, 8> {
struct Cpu_targets : Bitfield<0, 8> { }; };
@ -214,7 +214,7 @@ class Genode::Arm_gic
/**
* Initialize CPU local interface of the controller
*/
void init_processor_local()
void init_cpu_local()
{
/* disable the priority filter */
_cpui.write<Cpui::Pmr::Priority>(_distr.min_priority());

View File

@ -33,11 +33,7 @@ namespace Genode
class Cpu;
}
namespace Kernel
{
using Genode::Cpu_lazy_state;
using Genode::Cpu;
}
namespace Kernel { using Genode::Cpu_lazy_state; }
class Genode::Cpu : public Arm
{
@ -141,7 +137,7 @@ class Genode::Cpu : public Arm
*/
static void tlb_insertions() { flush_tlb(); }
static void start_secondary_processors(void *) { assert(!Board::is_smp()); }
static void start_secondary_cpus(void *) { assert(!Board::is_smp()); }
/**
* Return wether to retry an undefined user instruction after this call
@ -157,7 +153,7 @@ class Genode::Cpu : public Arm
static void translation_added(addr_t const addr, size_t const size)
{
/*
* The Cortex A8 processor can't use the L1 cache on page-table
* The Cortex-A8 CPU can't use the L1 cache on page-table
* walks. Therefore, as the page-tables lie in write-back cacheable
* memory we've to clean the corresponding cache-lines even when a
* page table entry is added. We only do this as core as the kernel
@ -167,12 +163,12 @@ class Genode::Cpu : public Arm
}
/**
* Return kernel name of the executing processor
* Return kernel name of the executing CPU
*/
static unsigned executing_id();
/**
* Return kernel name of the primary processor
* Return kernel name of the primary CPU
*/
static unsigned primary_id();

View File

@ -15,12 +15,10 @@
.include "spec/arm/macros_support.s"
/**
* Determine the kernel name of the executing processor
*
* \param target_reg register that shall receive the processor name
* Load kernel name of the executing CPU into register 'r'
*/
.macro _get_processor_id target_reg
.macro _get_cpu_id r
/* no multiprocessing supported for ARMv6 */
mov \target_reg, #0
mov \r, #0
.endm

View File

@ -292,12 +292,12 @@ class Genode::Arm_v7 : public Arm
static void data_synchronization_barrier() { asm volatile ("dsb"); }
/**
* Enable secondary processors with instr. pointer 'ip'
* Enable secondary CPUs with instr. pointer 'ip'
*/
static void start_secondary_processors(void * const ip)
static void start_secondary_cpus(void * const ip)
{
if (!(PROCESSORS > 1)) { return; }
Board::secondary_processors_ip(ip);
if (!(NR_OF_CPUS > 1)) { return; }
Board::secondary_cpus_ip(ip);
data_synchronization_barrier();
asm volatile ("sev\n");
}

View File

@ -15,15 +15,13 @@
.include "spec/arm/macros_support.s"
/**
* Determine the kernel name of the executing processor
*
* \param target_reg register that shall receive the processor name
* Load kernel name of the executing CPU into register 'r'
*/
.macro _get_processor_id target_reg
.macro _get_cpu_id r
/* read the multiprocessor affinity register */
mrc p15, 0, \target_reg, c0, c0, 5
mrc p15, 0, \r, c0, c0, 5
/* get the affinity-0 bitfield from the read register value */
and \target_reg, \target_reg, #0xff
and \r, \r, #0xff
.endm

View File

@ -28,14 +28,10 @@ namespace Genode
static void prepare_kernel() { }
/**
* Tell secondary processors where to start execution from
*
* \param ip initial instruction pointer of secondary processors
* Tell secondary CPUs to start execution from instr. pointer 'ip'
*/
static void secondary_processors_ip(void * const ip)
{
*(void * volatile *)IRAM_BASE = ip;
}
static void secondary_cpus_ip(void * const ip) {
*(void * volatile *)IRAM_BASE = ip; }
static bool is_smp() { return true; }
};

View File

@ -30,11 +30,7 @@ namespace Genode
class Cpu;
}
namespace Kernel
{
using Genode::Cpu_lazy_state;
using Genode::Cpu;
}
namespace Kernel { using Genode::Cpu_lazy_state; }
class Genode::Cpu : public Arm_v7
{
@ -46,12 +42,12 @@ class Genode::Cpu : public Arm_v7
bool retry_undefined_instr(Cpu_lazy_state *) { return false; }
/**
* Return kernel name of the executing processor
* Return kernel name of the executing CPU
*/
static unsigned executing_id();
/**
* Return kernel name of the primary processor
* Return kernel name of the primary CPU
*/
static unsigned primary_id();

View File

@ -30,11 +30,7 @@ namespace Genode
class Cpu;
}
namespace Kernel
{
using Genode::Cpu_lazy_state;
using Genode::Cpu;
}
namespace Kernel { using Genode::Cpu_lazy_state; }
class Genode::Cpu : public Arm_v7
{
@ -59,7 +55,7 @@ class Genode::Cpu : public Arm_v7
static void translation_added(addr_t const addr, size_t const size)
{
/*
* The Cortex A8 processor can't use the L1 cache on page-table
* The Cortex-A8 CPU can't use the L1 cache on page-table
* walks. Therefore, as the page-tables lie in write-back cacheable
* memory we've to clean the corresponding cache-lines even when a
* page table entry is added. We only do this as core as the kernel
@ -69,12 +65,12 @@ class Genode::Cpu : public Arm_v7
}
/**
* Return kernel name of the executing processor
* Return kernel name of the executing CPU
*/
static unsigned executing_id();
/**
* Return kernel name of the primary processor
* Return kernel name of the primary CPU
*/
static unsigned primary_id();

View File

@ -31,11 +31,7 @@ namespace Genode
class Cpu;
}
namespace Kernel
{
using Genode::Cpu_lazy_state;
using Genode::Cpu;
}
namespace Kernel { using Genode::Cpu_lazy_state; }
class Genode::Cpu_lazy_state
{
@ -167,9 +163,7 @@ class Genode::Cpu : public Arm_v7
}
/**
* Save state of the advanced FP/SIMD extension to memory
*
* \param state processor state to save FP/SIMD state into
* Save state of the advanced FP/SIMD extension into 'state'
*/
static void _save_advanced_fp_simd_state(Cpu_lazy_state * const state)
{
@ -189,9 +183,7 @@ class Genode::Cpu : public Arm_v7
}
/**
* Load state of the advanced FP/SIMD extension from memory
*
* \param state processor state to load FP/SIMD state out of
* Load state of the advanced FP/SIMD extension from 'state'
*/
static void _load_advanced_fp_simd_state(Cpu_lazy_state * const state)
{
@ -259,8 +251,8 @@ class Genode::Cpu : public Arm_v7
/**
* Prepare for the proceeding of a user
*
* \param old_state processor state of the last user
* \param new_state processor state of the next user
* \param old_state CPU state of the last user
* \param new_state CPU state of the next user
*/
static void prepare_proceeding(Cpu_lazy_state * const old_state,
Cpu_lazy_state * const new_state)
@ -272,7 +264,7 @@ class Genode::Cpu : public Arm_v7
/**
* Return wether to retry an undefined user instruction after this call
*
* \param state processor state of the user
* \param state CPU state of the user
*/
bool retry_undefined_instr(Cpu_lazy_state * const state)
{
@ -289,12 +281,12 @@ class Genode::Cpu : public Arm_v7
}
/**
* Return kernel name of the executing processor
* Return kernel name of the executing CPU
*/
static unsigned executing_id();
/**
* Return kernel name of the primary processor
* Return kernel name of the primary CPU
*/
static unsigned primary_id();

View File

@ -188,13 +188,11 @@ class Genode::Timer : public Mmio
/**
* Return kernel name of timer interrupt of a specific processor
*
* \param processor_id kernel name of targeted processor
* Return kernel name of the interrupt of the timer of CPU 'cpu'
*/
static unsigned interrupt_id(unsigned const processor_id)
static unsigned interrupt_id(unsigned const cpu)
{
switch (processor_id) {
switch (cpu) {
case 0: return Board::MCT_IRQ_L0;
case 1: return Board::MCT_IRQ_L1;
default: return 0;
@ -218,15 +216,11 @@ class Genode::Timer : public Mmio
}
/**
* Start single timeout run
*
* \param tics delay of timer interrupt
* \param processor_id kernel name of processor of targeted timer
* Raise interrupt of CPU 'cpu' once after timeout 'tics'
*/
inline void start_one_shot(unsigned const tics,
unsigned const processor_id)
inline void start_one_shot(unsigned const tics, unsigned const cpu)
{
switch (processor_id) {
switch (cpu) {
case 0:
write<L0_int_cstat::Frcnt>(1);
_run_0(0);
@ -248,9 +242,9 @@ class Genode::Timer : public Mmio
*/
unsigned ms_to_tics(unsigned const ms) { return ms * _tics_per_ms; }
unsigned value(unsigned const processor_id)
unsigned value(unsigned const cpu)
{
switch (processor_id) {
switch (cpu) {
case 0: return read<L0_frcnto>();
case 1: return read<L1_frcnto>();
default: return 0;

View File

@ -105,7 +105,7 @@ class Genode::Pic : public Mmio
** Dummies **
*************/
void init_processor_local() { }
void init_cpu_local() { }
void trigger_ip_interrupt(unsigned) { }
void finish_request() { /* done by source retraction or masking */ }
};

View File

@ -95,14 +95,10 @@ namespace Imx53
aips_2()->prepare_kernel();
}
static bool is_smp() { return false; }
static void outer_cache_invalidate() { }
static void outer_cache_flush() { }
/**
* Tell secondary processors where to start execution from
*/
static void secondary_processors_ip(void *) { }
static bool is_smp() { return false; }
static void secondary_cpus_ip(void *) { }
};
}

View File

@ -147,7 +147,7 @@ class Genode::Pic : public Mmio
if (valid(i)) { write<Enclear::Clear_enable>(1, i); } }
/**
* Wether an interrupt is inter-processor interrupt of a processor
* Wether an interrupt is inter-processor interrupt of a CPU
*/
bool is_ip_interrupt(unsigned, unsigned) { return false; }
@ -156,7 +156,7 @@ class Genode::Pic : public Mmio
*************/
void trigger_ip_interrupt(unsigned) { }
void init_processor_local() { }
void init_cpu_local() { }
void finish_request() { }
};

View File

@ -111,8 +111,7 @@ namespace Genode
static void outer_cache_invalidate();
static void outer_cache_flush();
static void prepare_kernel();
static void secondary_processors_ip(void * const ip) { }
static void secondary_cpus_ip(void * const ip) { }
static bool is_smp() { return true; }
};
}

View File

@ -171,7 +171,7 @@ class Genode::Pic : Mmio
*/
Pic() : Mmio(Board::IRQ_CONTROLLER_BASE) { mask(); }
void init_processor_local() { }
void init_cpu_local() { }
bool take_request(unsigned &irq)
{
@ -235,24 +235,12 @@ class Genode::Pic : Mmio
write<Irq_disable_gpu_2>(1 << (i - 8 - 32));
}
/**
* Wether an interrupt is inter-processor interrupt of a processor
*
* \param interrupt_id kernel name of the interrupt
* \param processor_id kernel name of the processor
/*
* Dummies
*/
bool is_ip_interrupt(unsigned const interrupt_id,
unsigned const processor_id)
{
return false;
}
/**
* Trigger the inter-processor interrupt of a processor
*
* \param processor_id kernel name of the processor
*/
void trigger_ip_interrupt(unsigned const processor_id) { }
bool is_ip_interrupt(unsigned, unsigned) { return false; }
void trigger_ip_interrupt(unsigned) { }
};
namespace Kernel { class Pic : public Genode::Pic { }; }

View File

@ -0,0 +1,194 @@
/*
* \brief Class for kernel data that is needed to manage a specific CPU
* \author Martin Stein
* \author Stefan Kalkowski
* \date 2014-01-14
*/
/*
* Copyright (C) 2014 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
/* core includes */
#include <kernel/cpu.h>
#include <kernel/kernel.h>
#include <kernel/thread.h>
#include <kernel/irq.h>
#include <pic.h>
#include <timer.h>
/* base includes */
#include <unmanaged_singleton.h>
using namespace Kernel;
namespace Kernel
{
/**
* Lists all pending domain updates
*/
class Cpu_domain_update_list;
Pic * pic();
Timer * timer();
Cpu_pool * cpu_pool() { return unmanaged_singleton<Cpu_pool>(); }
}
class Kernel::Cpu_domain_update_list
: public Double_list_typed<Cpu_domain_update>
{
typedef Cpu_domain_update Update;
public:
/**
* Perform all pending domain updates on the executing CPU
*/
void do_each() { for_each([] (Update * const u) { u->_do(); }); }
};
namespace Kernel
{
/**
* Return singleton of the CPU domain-udpate list
*/
Cpu_domain_update_list * cpu_domain_update_list() {
return unmanaged_singleton<Cpu_domain_update_list>(); }
}
/*************
** Cpu_job **
*************/
Cpu_job::~Cpu_job() { if (_cpu) { _cpu->scheduler()->remove(this); } }
void Cpu_job::_schedule() { _cpu->schedule(this); }
void Cpu_job::_unschedule()
{
assert(_cpu->id() == Cpu::executing_id());
_cpu->scheduler()->unready(this);
}
void Cpu_job::_yield()
{
assert(_cpu->id() == Cpu::executing_id());
_cpu->scheduler()->yield();
}
void Cpu_job::_interrupt(unsigned const cpu_id)
{
/* determine handling for specific interrupt */
unsigned irq_id;
Pic * const ic = pic();
if (ic->take_request(irq_id)) {
/* check wether the interrupt is a CPU-scheduling timeout */
if (!_cpu->timer_irq(irq_id)) {
/* check wether the interrupt is our IPI */
if (ic->is_ip_interrupt(irq_id, cpu_id)) {
cpu_domain_update_list()->do_each();
_cpu->ip_interrupt_handled();
/* try to inform the user interrupt-handler */
} else { Irq::occurred(irq_id); }
}
}
/* end interrupt request at controller */
ic->finish_request();
}
void Cpu_job::affinity(Cpu * const cpu)
{
_cpu = cpu;
_cpu->scheduler()->insert(this);
}
/**************
** Cpu_idle **
**************/
Cpu_idle::Cpu_idle(Cpu * const cpu) : Cpu_job(Cpu_priority::min)
{
Cpu_job::cpu(cpu);
cpu_exception = RESET;
ip = (addr_t)&_main;
sp = (addr_t)&_stack[stack_size];
init_thread((addr_t)core_pd()->translation_table(), core_pd()->id());
}
void Cpu_idle::proceed(unsigned const cpu) { mtc()->continue_user(this, cpu); }
/*********
** Cpu **
*********/
void Cpu::schedule(Job * const job)
{
if (_id == executing_id()) { _scheduler.ready(job); }
else if (_scheduler.ready_check(job)) { trigger_ip_interrupt(); }
}
void Cpu::trigger_ip_interrupt()
{
if (!_ip_interrupt_pending) {
pic()->trigger_ip_interrupt(_id);
_ip_interrupt_pending = true;
}
}
/***********************
** Cpu_domain_update **
***********************/
void Cpu_domain_update::_do()
{
/* perform domain update locally and get pending bit */
unsigned const id = Cpu::executing_id();
if (!_pending[id]) { return; }
_domain_update();
_pending[id] = false;
/* check wether there are still CPUs pending */
unsigned i = 0;
for (; i < NR_OF_CPUS && !_pending[i]; i++) { }
if (i < NR_OF_CPUS) { return; }
/* as no CPU is pending anymore, end the domain update */
cpu_domain_update_list()->remove(this);
_cpu_domain_update_unblocks();
}
bool Cpu_domain_update::_do_global(unsigned const domain_id)
{
/* perform locally and leave it at that if in uniprocessor mode */
_domain_id = domain_id;
_domain_update();
if (NR_OF_CPUS == 1) { return false; }
/* inform other CPUs and block until they are done */
cpu_domain_update_list()->insert_tail(this);
unsigned const cpu_id = Cpu::executing_id();
for (unsigned i = 0; i < NR_OF_CPUS; i++) {
if (i == cpu_id) { continue; }
_pending[i] = true;
cpu_pool()->cpu(i)->trigger_ip_interrupt();
}
return true;
}

View File

@ -12,7 +12,7 @@
*/
/* core includes */
#include <kernel/processor.h>
#include <kernel/cpu.h>
#include <kernel/irq.h>
#include <pic.h>
@ -22,4 +22,4 @@ namespace Kernel { Pic * pic(); }
void Irq::_disable() const { pic()->mask(_id()); }
void Irq::_enable() const { pic()->unmask(_id(), Processor::executing_id()); }
void Irq::_enable() const { pic()->unmask(_id(), Cpu::executing_id()); }

View File

@ -43,7 +43,7 @@ using namespace Kernel;
extern Genode::Native_thread_id _main_thread_id;
extern "C" void CORE_MAIN();
extern void * _start_secondary_processors;
extern void * _start_secondary_cpus;
extern int _prog_img_beg;
extern int _prog_img_end;
@ -157,17 +157,14 @@ namespace Kernel
}
/**
* Return wether an interrupt is private to the kernel
*
* \param interrupt_id kernel name of the targeted interrupt
* Return wether interrupt 'irq' is private to the kernel
*/
bool private_interrupt(unsigned const interrupt_id)
bool private_interrupt(unsigned const irq)
{
bool ret = 0;
for (unsigned i = 0; i < PROCESSORS; i++) {
ret |= interrupt_id == Timer::interrupt_id(i);
for (unsigned i = 0; i < NR_OF_CPUS; i++) {
if (irq == Timer::interrupt_id(i)) { return 1; }
}
return ret;
return 0;
}
}
@ -207,67 +204,106 @@ namespace Kernel
/**
* Enable kernel-entry assembly to get an exclusive stack at every processor
* Enable kernel-entry assembly to get an exclusive stack for every CPU
*/
unsigned kernel_stack_size = Kernel::STACK_SIZE;
char kernel_stack[PROCESSORS][Kernel::STACK_SIZE]
char kernel_stack[NR_OF_CPUS][Kernel::STACK_SIZE]
__attribute__((aligned(16)));
/**
* Setup kernel enviroment before activating secondary processors
* Setup kernel environment before activating secondary CPUs
*/
extern "C" void init_kernel_uniprocessor()
extern "C" void init_kernel_up()
{
/************************************************************************
** As atomic operations are broken in physical mode on some platforms **
** we must avoid the use of 'cmpxchg' by now (includes not using any **
** local static objects. **
************************************************************************/
/*
* As atomic operations are broken in physical mode on some platforms
* we must avoid the use of 'cmpxchg' by now (includes not using any
* local static objects.
*/
/* calculate in advance as needed later when data writes aren't allowed */
core_tt_base = (addr_t) core_pd()->translation_table();
core_pd_id = core_pd()->id();
/* initialize all processor objects */
processor_pool();
/* initialize all CPU objects */
cpu_pool();
/* go multiprocessor mode */
Processor::start_secondary_processors(&_start_secondary_processors);
Cpu::start_secondary_cpus(&_start_secondary_cpus);
}
/**
* Setup kernel enviroment after activating secondary processors
* Setup kernel enviroment after activating secondary CPUs as primary CPU
*/
extern "C" void init_kernel_multiprocessor()
void init_kernel_mp_primary()
{
/***********************************************************************
** As updates on a cached kernel lock might not be visible to **
** processors that have not enabled caches, we can't synchronize the **
** activation of MMU and caches. Hence we must avoid write access to **
** kernel data by now. **
***********************************************************************/
/* get stack memory that fullfills the constraints for core stacks */
enum {
STACK_ALIGNM = 1 << Genode::CORE_STACK_ALIGNM_LOG2,
STACK_SIZE = DEFAULT_STACK_SIZE,
};
static_assert(STACK_SIZE <= STACK_ALIGNM - sizeof(Core_thread_id),
"stack size does not fit stack alignment of core");
static char s[STACK_SIZE] __attribute__((aligned(STACK_ALIGNM)));
/* synchronize data view of all processors */
Processor::invalidate_data_caches();
Processor::invalidate_instr_caches();
Processor::data_synchronization_barrier();
/* provide thread ident at the aligned base of the stack */
*(Core_thread_id *)s = 0;
/* initialize processor in physical mode */
Processor::init_phys_kernel();
/* start thread with stack pointer at the top of stack */
static Native_utcb utcb;
static Thread t(Cpu_priority::max, "core");
_main_thread_id = t.id();
_main_thread_utcb = &utcb;
_main_thread_utcb->start_info()->init(t.id(), Genode::Native_capability());
t.ip = (addr_t)CORE_MAIN;;
t.sp = (addr_t)s + STACK_SIZE;
t.init(cpu_pool()->primary_cpu(), core_pd(), &utcb, 1);
/* initialize interrupt objects */
static Genode::uint8_t _irqs[Pic::NR_OF_IRQ * sizeof(Irq)];
for (unsigned i = 0; i < Pic::NR_OF_IRQ; i++) {
if (private_interrupt(i)) { continue; }
new (&_irqs[i * sizeof(Irq)]) Irq(i);
}
/* kernel initialization finished */
Genode::printf("kernel initialized\n");
test();
}
/**
* Setup kernel enviroment after activating secondary CPUs
*/
extern "C" void init_kernel_mp()
{
/*
* As updates on a cached kernel lock might not be visible to CPUs that
* have not enabled caches, we can't synchronize the activation of MMU and
* caches. Hence we must avoid write access to kernel data by now.
*/
/* synchronize data view of all CPUs */
Cpu::invalidate_data_caches();
Cpu::invalidate_instr_caches();
Cpu::data_synchronization_barrier();
/* initialize CPU in physical mode */
Cpu::init_phys_kernel();
/* switch to core address space */
Processor::init_virt_kernel(core_tt_base, core_pd_id);
Cpu::init_virt_kernel(core_tt_base, core_pd_id);
/************************************
** Now it's safe to use 'cmpxchg' **
************************************/
/*
* Now it's safe to use 'cmpxchg'
*/
Lock::Guard guard(data_lock());
/*******************************************
** Now it's save to write to kernel data **
*******************************************/
/*
* Now it's save to write to kernel data
*/
/*
* TrustZone initialization code
@ -279,51 +315,18 @@ extern "C" void init_kernel_multiprocessor()
/*
* Enable performance counter
*
* FIXME This is an optional processor specific feature
* FIXME This is an optional CPU specific feature
*/
perf_counter()->enable();
/* locally initialize interrupt controller */
unsigned const processor_id = Processor::executing_id();
Processor * const processor = processor_pool()->processor(processor_id);
pic()->init_processor_local();
pic()->unmask(Timer::interrupt_id(processor_id), processor_id);
unsigned const cpu = Cpu::executing_id();
pic()->init_cpu_local();
pic()->unmask(Timer::interrupt_id(cpu), cpu);
/* as primary processor create the core main thread */
if (Processor::primary_id() == processor_id)
{
/* get stack memory that fullfills the constraints for core stacks */
enum {
STACK_ALIGNM = 1 << Genode::CORE_STACK_ALIGNM_LOG2,
STACK_SIZE = DEFAULT_STACK_SIZE,
};
static_assert(STACK_SIZE <= STACK_ALIGNM - sizeof(Core_thread_id),
"stack size does not fit stack alignment of core");
static char s[STACK_SIZE] __attribute__((aligned(STACK_ALIGNM)));
/* provide thread ident at the aligned base of the stack */
*(Core_thread_id *)s = 0;
/* start thread with stack pointer at the top of stack */
static Native_utcb utcb;
static Thread t(Cpu_priority::max, "core");
_main_thread_id = t.id();
_main_thread_utcb = &utcb;
_main_thread_utcb->start_info()->init(t.id(), Genode::Native_capability());
t.ip = (addr_t)CORE_MAIN;;
t.sp = (addr_t)s + STACK_SIZE;
t.init(processor, core_pd(), &utcb, 1);
/* initialize interrupt objects */
static Genode::uint8_t _irqs[Pic::NR_OF_IRQ * sizeof(Irq)];
for (unsigned i = 0; i < Pic::NR_OF_IRQ; i++) {
if (private_interrupt(i)) { continue; }
new (&_irqs[i * sizeof(Irq)]) Irq(i);
}
/* kernel initialization finished */
Genode::printf("kernel initialized\n");
test();
}
/* do further initialization only as primary CPU */
if (Cpu::primary_id() != cpu) { return; }
init_kernel_mp_primary();
}
@ -332,13 +335,8 @@ extern "C" void init_kernel_multiprocessor()
*/
extern "C" void kernel()
{
/* ensure that no other processor accesses kernel data while we do */
data_lock().lock();
/* determine local processor object and let it handle its exception */
unsigned const processor_id = Processor::executing_id();
Processor * const processor = processor_pool()->processor(processor_id);
processor->exception();
cpu_pool()->cpu(Cpu::executing_id())->exception();
}

View File

@ -1,196 +0,0 @@
/*
* \brief A multiplexable common instruction processor
* \author Martin Stein
* \author Stefan Kalkowski
* \date 2014-01-14
*/
/*
* Copyright (C) 2014 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
/* core includes */
#include <kernel/processor.h>
#include <kernel/kernel.h>
#include <kernel/thread.h>
#include <kernel/irq.h>
#include <pic.h>
#include <timer.h>
using namespace Kernel;
namespace Kernel
{
/**
* Lists all pending domain updates
*/
class Processor_domain_update_list;
Pic * pic();
Timer * timer();
Processor_pool * processor_pool() {
return unmanaged_singleton<Processor_pool>(); }
}
class Kernel::Processor_domain_update_list
: public Double_list_typed<Processor_domain_update>
{
typedef Processor_domain_update Update;
public:
/**
* Perform all pending domain updates on the executing processor
*/
void do_each() { for_each([] (Update * const u) { u->_do(); }); }
};
namespace Kernel
{
/**
* Return singleton of the processor domain-udpate list
*/
Processor_domain_update_list * processor_domain_update_list() {
return unmanaged_singleton<Processor_domain_update_list>(); }
}
/*************
** Cpu_job **
*************/
Cpu_job::~Cpu_job() { if (_cpu) { _cpu->scheduler()->remove(this); } }
void Cpu_job::_schedule() { _cpu->schedule(this); }
void Cpu_job::_unschedule()
{
assert(_cpu->id() == Processor::executing_id());
_cpu->scheduler()->unready(this);
}
void Cpu_job::_yield()
{
assert(_cpu->id() == Processor::executing_id());
_cpu->scheduler()->yield();
}
void Cpu_job::_interrupt(unsigned const processor_id)
{
/* determine handling for specific interrupt */
unsigned irq_id;
Pic * const ic = pic();
if (ic->take_request(irq_id)) {
/* check wether the interrupt is a processor-scheduling timeout */
if (!_cpu->timer_irq(irq_id)) {
/* check wether the interrupt is our inter-processor interrupt */
if (ic->is_ip_interrupt(irq_id, processor_id)) {
processor_domain_update_list()->do_each();
_cpu->ip_interrupt_handled();
/* after all it must be a user interrupt */
} else {
/* try to inform the user interrupt-handler */
Irq::occurred(irq_id);
}
}
}
/* end interrupt request at controller */
ic->finish_request();
}
void Cpu_job::affinity(Processor * const cpu)
{
_cpu = cpu;
_cpu->scheduler()->insert(this);
}
/**************
** Cpu_idle **
**************/
Cpu_idle::Cpu_idle(Processor * const cpu) : Cpu_job(Cpu_priority::min)
{
Cpu_job::cpu(cpu);
cpu_exception = RESET;
ip = (addr_t)&_main;
sp = (addr_t)&_stack[stack_size];
init_thread((addr_t)core_pd()->translation_table(), core_pd()->id());
}
void Cpu_idle::proceed(unsigned const cpu) { mtc()->continue_user(this, cpu); }
/***************
** Processor **
***************/
void Processor::schedule(Job * const job)
{
if (_id == executing_id()) { _scheduler.ready(job); }
else if (_scheduler.ready_check(job)) { trigger_ip_interrupt(); }
}
void Processor::trigger_ip_interrupt()
{
if (!_ip_interrupt_pending) {
pic()->trigger_ip_interrupt(_id);
_ip_interrupt_pending = true;
}
}
/*****************************
** Processor_domain_update **
*****************************/
void Processor_domain_update::_do()
{
/* perform domain update locally and get pending bit */
unsigned const processor_id = Processor::executing_id();
if (!_pending[processor_id]) { return; }
_domain_update();
_pending[processor_id] = false;
/* check wether there are still processors pending */
unsigned i = 0;
for (; i < PROCESSORS && !_pending[i]; i++) { }
if (i < PROCESSORS) { return; }
/* as no processors pending anymore, end the domain update */
processor_domain_update_list()->remove(this);
_processor_domain_update_unblocks();
}
bool Processor_domain_update::_do_global(unsigned const domain_id)
{
/* perform locally and leave it at that if in uniprocessor mode */
_domain_id = domain_id;
_domain_update();
if (PROCESSORS == 1) { return false; }
/* inform other processors and block until they are done */
processor_domain_update_list()->insert_tail(this);
unsigned const processor_id = Processor::executing_id();
for (unsigned i = 0; i < PROCESSORS; i++) {
if (i == processor_id) { continue; }
_pending[i] = true;
processor_pool()->processor(i)->trigger_ip_interrupt();
}
return true;
}

View File

@ -152,12 +152,12 @@ Thread::Thread(unsigned const priority, char const * const label)
{ cpu_exception = RESET; }
void Thread::init(Processor * const processor, Pd * const pd,
void Thread::init(Cpu * const cpu, Pd * const pd,
Native_utcb * const utcb_phys, bool const start)
{
assert(_state == AWAITS_START)
Cpu_job::affinity(processor);
Cpu_job::affinity(cpu);
_utcb_phys = utcb_phys;
/* join protection domain */
@ -168,10 +168,8 @@ void Thread::init(Processor * const processor, Pd * const pd,
if (START_VERBOSE) {
Genode::printf("start thread %u '%s' in program %u '%s' ",
id(), label(), pd_id(), pd_label());
if (PROCESSORS) {
Genode::printf("on processor %u/%u ",
processor->id(), PROCESSORS);
}
if (NR_OF_CPUS) {
Genode::printf("on CPU %u/%u ", cpu->id(), NR_OF_CPUS); }
Genode::printf("\n");
}
/* start execution */
@ -182,7 +180,7 @@ void Thread::init(Processor * const processor, Pd * const pd,
void Thread::_stop() { _unschedule(STOPPED); }
void Thread::exception(unsigned const processor_id)
void Thread::exception(unsigned const cpu)
{
switch (cpu_exception) {
case SUPERVISOR_CALL:
@ -195,10 +193,10 @@ void Thread::exception(unsigned const processor_id)
_mmu_exception();
return;
case INTERRUPT_REQUEST:
_interrupt(processor_id);
_interrupt(cpu);
return;
case FAST_INTERRUPT_REQUEST:
_interrupt(processor_id);
_interrupt(cpu);
return;
case UNDEFINED_INSTRUCTION:
if (_cpu->retry_undefined_instr(&_lazy_state)) { return; }
@ -222,10 +220,7 @@ void Thread::_receive_yielded_cpu()
}
void Thread::proceed(unsigned const processor_id)
{
mtc()->continue_user(this, processor_id);
}
void Thread::proceed(unsigned const cpu) { mtc()->continue_user(this, cpu); }
char const * Kernel::Thread::pd_label() const
@ -263,7 +258,7 @@ void Thread::_call_bin_pd()
pd->~Pd();
/* clean up buffers of memory management */
Processor::flush_tlb_by_pid(pd->id());
Cpu::flush_tlb_by_pid(pd->id());
user_arg_0(0);
}
@ -295,32 +290,28 @@ void Thread::_call_bin_thread()
void Thread::_call_start_thread()
{
/* lookup thread */
unsigned const thread_id = user_arg_1();
Thread * const thread = Thread::pool()->object(thread_id);
Thread * const thread = Thread::pool()->object(user_arg_1());
if (!thread) {
PWRN("failed to lookup thread");
user_arg_0(0);
return;
}
/* lookup processor */
unsigned const processor_id = user_arg_2();
Processor * const processor = processor_pool()->processor(processor_id);
if (!processor) {
PWRN("failed to lookup processor");
/* lookup CPU */
Cpu * const cpu = cpu_pool()->cpu(user_arg_2());
if (!cpu) {
PWRN("failed to lookup CPU");
user_arg_0(0);
return;
}
/* lookup domain */
unsigned const pd_id = user_arg_3();
Pd * const pd = Pd::pool()->object(pd_id);
Pd * const pd = Pd::pool()->object(user_arg_3());
if (!pd) {
PWRN("failed to lookup domain");
user_arg_0(0);
return;
}
/* start thread */
Native_utcb * const utcb = (Native_utcb *)user_arg_4();
thread->init(processor, pd, utcb, 1);
thread->init(cpu, pd, (Native_utcb *)user_arg_4(), 1);
user_arg_0((Call_ret)thread->_pd->translation_table());
}
@ -371,7 +362,7 @@ void Thread::_call_resume_local_thread()
void Thread_event::_signal_acknowledged()
{
Processor::tlb_insertions();
Cpu::tlb_insertions();
_thread->_resume();
}
@ -530,10 +521,8 @@ void Thread::_call_access_thread_regs()
}
void Thread::_call_update_pd()
{
if (Processor_domain_update::_do_global(user_arg_1())) { _pause(); }
}
void Thread::_call_update_pd() {
if (Cpu_domain_update::_do_global(user_arg_1())) { _pause(); } }
void Thread::_call_update_data_region()
@ -548,12 +537,12 @@ void Thread::_call_update_data_region()
* until then we apply operations to caches as a whole instead.
*/
if (!_core()) {
Processor::flush_data_caches();
Cpu::flush_data_caches();
return;
}
auto base = (addr_t)user_arg_1();
auto const size = (size_t)user_arg_2();
Processor::flush_data_caches_by_virt_region(base, size);
Cpu::flush_data_caches_by_virt_region(base, size);
}
@ -569,14 +558,14 @@ void Thread::_call_update_instr_region()
* until then we apply operations to caches as a whole instead.
*/
if (!_core()) {
Processor::flush_data_caches();
Processor::invalidate_instr_caches();
Cpu::flush_data_caches();
Cpu::invalidate_instr_caches();
return;
}
auto base = (addr_t)user_arg_1();
auto const size = (size_t)user_arg_2();
Processor::flush_data_caches_by_virt_region(base, size);
Processor::invalidate_instr_caches_by_virt_region(base, size);
Cpu::flush_data_caches_by_virt_region(base, size);
Cpu::invalidate_instr_caches_by_virt_region(base, size);
}

View File

@ -245,7 +245,7 @@ bool Genode::unmap_local(addr_t virt_addr, size_t num_pages)
tt->remove_translation(virt_addr, num_pages * get_page_size(),
Kernel::core_pd()->platform_pd()->page_slab());
/* update translation caches of all processors */
/* update translation caches of all CPUs */
Kernel::update_pd(Kernel::core_pd()->id());
return true;
} catch(...) {

View File

@ -200,14 +200,12 @@ int Platform_thread::start(void * const ip, void * const sp)
PERR("failed to initialize thread registers");
return -1;
}
/* determine kernel name of targeted processor */
unsigned processor_id;
if (_location.valid()) { processor_id = _location.xpos(); }
else { processor_id = Cpu::primary_id(); }
/* start executing new thread */
unsigned const cpu =
_location.valid() ? _location.xpos() : Cpu::primary_id();
_utcb_core_addr->start_info()->init(_id, _utcb);
if (!Kernel::start_thread(_id, processor_id, _pd->id(), _utcb_core_addr)) {
if (!Kernel::start_thread(_id, cpu, _pd->id(), _utcb_core_addr)) {
PERR("failed to start thread");
return -1;
}

View File

@ -44,7 +44,7 @@ void Rm_client::unmap(addr_t, addr_t virt_base, size_t size)
}
tt->remove_translation(virt_base, size,pt->pd()->page_slab());
/* update translation caches of all processors */
/* update translation caches of all CPUs */
Kernel::update_pd(pt->pd()->id());
}

View File

@ -36,9 +36,9 @@
.section ".text.crt0"
/****************************************
** Startup code for primary processor **
****************************************/
/**********************************
** Startup code for primary CPU **
**********************************/
.global _start
_start:
@ -70,21 +70,21 @@
add sp, r0, r1
/* uniprocessor kernel-initialization which activates multiprocessor */
bl init_kernel_uniprocessor
bl init_kernel_up
/***************************************************
** Startup code that is common to all processors **
***************************************************/
/*********************************************
** Startup code that is common to all CPUs **
*********************************************/
.global _start_secondary_processors
_start_secondary_processors:
.global _start_secondary_cpus
_start_secondary_cpus:
/* setup multiprocessor-aware kernel stack-pointer */
_get_constraints_of_kernel_stacks r0, r1
_init_kernel_sp r0, r1
/* do multiprocessor kernel-initialization */
bl init_kernel_multiprocessor
bl init_kernel_mp
/* call the kernel main-routine */
bl kernel

View File

@ -21,7 +21,7 @@
** Constants **
***************/
/* hardware names of processor modes */
/* hardware names of CPU modes */
.set USR_MODE, 16
.set FIQ_MODE, 17
.set IRQ_MODE, 18
@ -38,48 +38,48 @@
************/
/**
* Determine the base of the client context of the executing processor
* Determine the base of the client context of the executing CPU
*
* \param target_reg register that shall receive the base pointer
* \param buf_reg register that can be polluted by the macro
*/
.macro _get_client_context_ptr target_reg, buf_reg
/* get kernel name of processor */
_get_processor_id \buf_reg
/* get kernel name of CPU */
_get_cpu_id \buf_reg
/* multiply processor name with pointer size to get offset of pointer */
/* multiply CPU name with pointer size to get offset of pointer */
mov \target_reg, #CONTEXT_PTR_SIZE
mul \buf_reg, \buf_reg, \target_reg
/* get base of the pointer array */
adr \target_reg, _mt_client_context_ptr
/* add offset and base to get processor-local pointer */
/* add offset and base to get CPU-local pointer */
add \target_reg, \target_reg, \buf_reg
ldr \target_reg, [\target_reg]
.endm
/**
* Determine the base of the globally mapped buffer of the executing processor
* Determine the base of the globally mapped buffer of the executing CPU
*
* \param target_reg register that shall receive the base pointer
* \param buf_reg register that can be polluted by the macro
*/
.macro _get_buffer_ptr target_reg, buf_reg
/* get kernel name of processor */
_get_processor_id \buf_reg
/* get kernel name of CPU */
_get_cpu_id \buf_reg
/* multiply processor name with buffer size to get offset of buffer */
/* multiply CPU name with buffer size to get offset of buffer */
mov \target_reg, #BUFFER_SIZE
mul \buf_reg, \buf_reg, \target_reg
/* get base of the buffer array */
adr \target_reg, _mt_buffer
/* add offset and base to get processor-local buffer */
/* add offset and base to get CPU-local buffer */
add \target_reg, \target_reg, \buf_reg
.endm
@ -153,7 +153,7 @@
/*
* The sp in svc mode still contains the base of the globally mapped buffer
* of this processor. Hence go to svc mode, buffer user r0-r2, and make
* of this CPU. Hence go to svc mode, buffer user r0-r2, and make
* buffer pointer available to all modes
*/
.if \exception_type != RST_TYPE && \exception_type != SVC_TYPE
@ -426,7 +426,7 @@
.global _mt_user_entry_pic
_mt_user_entry_pic:
/* get user context and globally mapped buffer of this processor */
/* get user context and globally mapped buffer of this CPU */
_get_client_context_ptr lr, r0
_get_buffer_ptr sp, r0

View File

@ -32,7 +32,7 @@ void Kernel::init_trustzone(Pic * pic)
using namespace Genode;
/* check for compatibility */
if (PROCESSORS > 1) {
if (NR_OF_CPUS > 1) {
PERR("trustzone not supported with multiprocessing");
return;
}

View File

@ -30,7 +30,7 @@ extern int _mon_kernel_entry;
void Kernel::init_trustzone(Pic * pic)
{
/* check for compatibility */
if (PROCESSORS > 1) {
if (NR_OF_CPUS > 1) {
PERR("trustzone not supported with multiprocessing");
return;
}