genode/repos/ports/src/app/gdb_monitor/cpu_session_component.cc

355 lines
8.4 KiB
C++
Raw Normal View History

2011-12-22 16:19:25 +01:00
/*
* \brief Implementation of the CPU session interface
* \author Christian Prochaska
* \date 2011-04-28
*/
/*
* Copyright (C) 2011-2017 Genode Labs GmbH
2011-12-22 16:19:25 +01:00
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU Affero General Public License version 3.
2011-12-22 16:19:25 +01:00
*/
/* Genode includes */
#include <base/env.h>
#include <base/log.h>
#include <base/sleep.h>
2011-12-22 16:19:25 +01:00
#include <cpu_session_component.h>
#include <util/list.h>
/* GDB monitor includes */
#include "cpu_thread_component.h"
2011-12-22 16:19:25 +01:00
/* libc includes */
#include <sys/signal.h>
/* genode-low.cc */
2011-12-22 16:19:25 +01:00
extern void genode_remove_thread(unsigned long lwpid);
using namespace Genode;
using namespace Gdb_monitor;
Cpu_session &Cpu_session_component::parent_cpu_session()
{
return _parent_cpu_session;
}
2011-12-22 16:19:25 +01:00
Rpc_entrypoint &Cpu_session_component::thread_ep()
2011-12-22 16:19:25 +01:00
{
return _ep;
2011-12-22 16:19:25 +01:00
}
Entrypoint &Cpu_session_component::signal_ep()
2011-12-22 16:19:25 +01:00
{
return _signal_ep;
2011-12-22 16:19:25 +01:00
}
Thread_capability Cpu_session_component::thread_cap(unsigned long lwpid)
{
Cpu_thread_component *cpu_thread = _thread_list.first();
while (cpu_thread) {
if (cpu_thread->lwpid() == lwpid)
return cpu_thread->thread_cap();
cpu_thread = cpu_thread->next();
2011-12-22 16:19:25 +01:00
}
return Thread_capability();
}
Cpu_thread_component *Cpu_session_component::lookup_cpu_thread(unsigned long lwpid)
2011-12-22 16:19:25 +01:00
{
Cpu_thread_component *cpu_thread = _thread_list.first();
while (cpu_thread) {
if (cpu_thread->lwpid() == lwpid)
return cpu_thread;
cpu_thread = cpu_thread->next();
}
return nullptr;
}
2011-12-22 16:19:25 +01:00
Cpu_thread_component *Cpu_session_component::lookup_cpu_thread(Thread_capability thread_cap)
{
Cpu_thread_component *cpu_thread = _thread_list.first();
while (cpu_thread) {
if (cpu_thread->thread_cap().local_name() == thread_cap.local_name())
return cpu_thread;
cpu_thread = cpu_thread->next();
2011-12-22 16:19:25 +01:00
}
return 0;
}
2011-12-22 16:19:25 +01:00
unsigned long Cpu_session_component::lwpid(Thread_capability thread_cap)
{
return lookup_cpu_thread(thread_cap)->lwpid();
2011-12-22 16:19:25 +01:00
}
int Cpu_session_component::signal_pipe_read_fd(Thread_capability thread_cap)
{
return lookup_cpu_thread(thread_cap)->signal_pipe_read_fd();
}
int Cpu_session_component::send_signal(Thread_capability thread_cap,
int signo)
2011-12-22 16:19:25 +01:00
{
Cpu_thread_component *cpu_thread = lookup_cpu_thread(thread_cap);
cpu_thread->pause();
switch (signo) {
case SIGSTOP:
Signal_transmitter(cpu_thread->sigstop_signal_context_cap()).submit();
return 1;
case SIGINT:
Signal_transmitter(cpu_thread->sigint_signal_context_cap()).submit();
return 1;
default:
error("unexpected signal ", signo);
return 0;
}
}
/*
* This function delivers a SIGSEGV to the first thread with an unresolved
* page fault that it finds. Multiple page-faulted threads are currently
* not supported.
*/
void Cpu_session_component::handle_unresolved_page_fault()
{
/*
* It can happen that the thread state of the thread which caused the
* page fault is not accessible yet. In that case, we'll retry until
* it is accessible.
*/
while (1) {
Thread_capability thread_cap = first();
while (thread_cap.valid()) {
try {
Cpu_thread_component *cpu_thread = lookup_cpu_thread(thread_cap);
Thread_state thread_state = cpu_thread->state();
if (thread_state.unresolved_page_fault) {
/*
* On base-foc it is necessary to pause the thread before
* IP and SP are available in the thread state.
*/
cpu_thread->pause();
cpu_thread->deliver_signal(SIGSEGV);
return;
}
} catch (Cpu_thread::State_access_failed) { }
thread_cap = next(thread_cap);
}
2011-12-22 16:19:25 +01:00
}
}
2011-12-22 16:19:25 +01:00
void Cpu_session_component::stop_new_threads(bool stop)
{
_stop_new_threads = stop;
2011-12-22 16:19:25 +01:00
}
bool Cpu_session_component::stop_new_threads()
2011-12-22 16:19:25 +01:00
{
return _stop_new_threads;
2011-12-22 16:19:25 +01:00
}
Lock &Cpu_session_component::stop_new_threads_lock()
2011-12-22 16:19:25 +01:00
{
return _stop_new_threads_lock;
2011-12-22 16:19:25 +01:00
}
int Cpu_session_component::handle_initial_breakpoint(unsigned long lwpid)
{
Cpu_thread_component *cpu_thread = _thread_list.first();
while (cpu_thread) {
if (cpu_thread->lwpid() == lwpid)
return cpu_thread->handle_initial_breakpoint();
cpu_thread = cpu_thread->next();
}
return 0;
}
2011-12-22 16:19:25 +01:00
void Cpu_session_component::pause_all_threads()
{
Lock::Guard stop_new_threads_lock_guard(stop_new_threads_lock());
2011-12-22 16:19:25 +01:00
stop_new_threads(true);
2011-12-22 16:19:25 +01:00
for (Cpu_thread_component *cpu_thread = _thread_list.first();
cpu_thread;
cpu_thread = cpu_thread->next()) {
2011-12-22 16:19:25 +01:00
cpu_thread->pause();
}
}
2011-12-22 16:19:25 +01:00
void Cpu_session_component::resume_all_threads()
{
Lock::Guard stop_new_threads_guard(stop_new_threads_lock());
2011-12-22 16:19:25 +01:00
stop_new_threads(false);
2011-12-22 16:19:25 +01:00
for (Cpu_thread_component *cpu_thread = _thread_list.first();
cpu_thread;
cpu_thread = cpu_thread->next()) {
2011-12-22 16:19:25 +01:00
cpu_thread->single_step(false);
cpu_thread->resume();
}
}
2011-12-22 16:19:25 +01:00
Thread_capability Cpu_session_component::first()
2011-12-22 16:19:25 +01:00
{
Cpu_thread_component *cpu_thread = _thread_list.first();
if (cpu_thread)
return cpu_thread->thread_cap();
else
return Thread_capability();
}
Thread_capability Cpu_session_component::next(Thread_capability thread_cap)
{
Cpu_thread_component *next_cpu_thread = lookup_cpu_thread(thread_cap)->next();
if (next_cpu_thread)
return next_cpu_thread->thread_cap();
else
return Thread_capability();
}
2011-12-22 16:19:25 +01:00
Thread_capability Cpu_session_component::create_thread(Capability<Pd_session> pd,
Cpu_session::Name const &name,
Affinity::Location affinity,
Weight weight,
addr_t utcb)
{
Cpu_thread_component *cpu_thread =
new (_md_alloc) Cpu_thread_component(*this, _core_pd, name,
affinity, weight, utcb,
_new_thread_pipe_write_end,
_breakpoint_len,
_breakpoint_data);
_thread_list.append(cpu_thread);
return cpu_thread->cap();
}
void Cpu_session_component::kill_thread(Thread_capability thread_cap)
{
Cpu_thread_component *cpu_thread = lookup_cpu_thread(thread_cap);
if (cpu_thread) {
if (cpu_thread->lwpid())
genode_remove_thread(cpu_thread->lwpid());
_thread_list.remove(cpu_thread);
destroy(_md_alloc, cpu_thread);
} else
error(__PRETTY_FUNCTION__, ": "
"could not find thread info for the given thread capability");
_parent_cpu_session.kill_thread(thread_cap);
}
void Cpu_session_component::exception_sigh(Signal_context_capability handler)
{
_parent_cpu_session.exception_sigh(handler);
}
Affinity::Space Cpu_session_component::affinity_space() const
{
return _parent_cpu_session.affinity_space();
}
Dataspace_capability Cpu_session_component::trace_control()
{
return _parent_cpu_session.trace_control();
}
Capability<Cpu_session::Native_cpu> Cpu_session_component::native_cpu()
{
return _native_cpu_cap;
}
Cpu_session_component::Cpu_session_component(Env &env,
Rpc_entrypoint &ep,
Allocator &md_alloc,
Pd_session_capability core_pd,
Entrypoint &signal_ep,
const char *args,
Affinity const &affinity,
int const new_thread_pipe_write_end,
int const breakpoint_len,
unsigned char const *breakpoint_data)
: _env(env),
_ep(ep),
_md_alloc(md_alloc),
_core_pd(core_pd),
_parent_cpu_session(env.session<Cpu_session>(_id_space_element.id(), args, affinity), *this),
_signal_ep(signal_ep),
_new_thread_pipe_write_end(new_thread_pipe_write_end),
_breakpoint_len(breakpoint_len),
_breakpoint_data(breakpoint_data),
_native_cpu_cap(_setup_native_cpu())
2011-12-22 16:19:25 +01:00
{
_ep.manage(this);
2011-12-22 16:19:25 +01:00
}
Cpu_session_component::~Cpu_session_component()
{
for (Cpu_thread_component *cpu_thread = _thread_list.first();
cpu_thread; cpu_thread = _thread_list.first()) {
_thread_list.remove(cpu_thread);
destroy(_md_alloc, cpu_thread);
}
_cleanup_native_cpu();
_ep.dissolve(this);
2011-12-22 16:19:25 +01:00
}
thread API & CPU session: accounting of CPU quota In the init configuration one can configure the donation of CPU time via 'resource' tags that have the attribute 'name' set to "CPU" and the attribute 'quantum' set to the percentage of CPU quota that init shall donate. The pattern is the same as when donating RAM quota. ! <start name="test"> ! <resource name="CPU" quantum="75"/> ! </start> This would cause init to try donating 75% of its CPU quota to the child "test". Init and core do not preserve CPU quota for their own requirements by default as it is done with RAM quota. The CPU quota that a process owns can be applied through the thread constructor. The constructor has been enhanced by an argument that indicates the percentage of the programs CPU quota that shall be granted to the new thread. So 'Thread(33, "test")' would cause the backing CPU session to try to grant 33% of the programs CPU quota to the thread "test". By now, the CPU quota of a thread can't be altered after construction. Constructing a thread with CPU quota 0 doesn't mean the thread gets never scheduled but that the thread has no guaranty to receive CPU time. Such threads have to live with excess CPU time. Threads that already existed in the official repositories of Genode were adapted in the way that they receive a quota of 0. This commit also provides a run test 'cpu_quota' in base-hw (the only kernel that applies the CPU-quota scheme currently). The test basically runs three threads with different physical CPU quota. The threads simply count for 30 seconds each and the test then checks wether the counter values relate to the CPU-quota distribution. fix #1275
2014-10-16 11:15:46 +02:00
thread API & CPU session: accounting of CPU quota In the init configuration one can configure the donation of CPU time via 'resource' tags that have the attribute 'name' set to "CPU" and the attribute 'quantum' set to the percentage of CPU quota that init shall donate. The pattern is the same as when donating RAM quota. ! <start name="test"> ! <resource name="CPU" quantum="75"/> ! </start> This would cause init to try donating 75% of its CPU quota to the child "test". Init and core do not preserve CPU quota for their own requirements by default as it is done with RAM quota. The CPU quota that a process owns can be applied through the thread constructor. The constructor has been enhanced by an argument that indicates the percentage of the programs CPU quota that shall be granted to the new thread. So 'Thread(33, "test")' would cause the backing CPU session to try to grant 33% of the programs CPU quota to the thread "test". By now, the CPU quota of a thread can't be altered after construction. Constructing a thread with CPU quota 0 doesn't mean the thread gets never scheduled but that the thread has no guaranty to receive CPU time. Such threads have to live with excess CPU time. Threads that already existed in the official repositories of Genode were adapted in the way that they receive a quota of 0. This commit also provides a run test 'cpu_quota' in base-hw (the only kernel that applies the CPU-quota scheme currently). The test basically runs three threads with different physical CPU quota. The threads simply count for 30 seconds each and the test then checks wether the counter values relate to the CPU-quota distribution. fix #1275
2014-10-16 11:15:46 +02:00
int Cpu_session_component::ref_account(Cpu_session_capability) { return -1; }
int Cpu_session_component::transfer_quota(Cpu_session_capability, size_t) { return -1; }
Cpu_session::Quota Cpu_session_component::quota() { return Quota(); }