2016-11-07 13:59:37 +01:00
|
|
|
/*
|
|
|
|
* \brief Multiplexing one time source amongst different timeout subjects
|
|
|
|
* \author Martin Stein
|
|
|
|
* \date 2016-11-04
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
2017-02-20 13:23:52 +01:00
|
|
|
* Copyright (C) 2016-2017 Genode Labs GmbH
|
2016-11-07 13:59:37 +01:00
|
|
|
*
|
|
|
|
* This file is part of the Genode OS framework, which is distributed
|
2017-02-20 13:23:52 +01:00
|
|
|
* under the terms of the GNU Affero General Public License version 3.
|
2016-11-07 13:59:37 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
/* Genode includes */
|
os/timer: interpolate time via timestamps
Previously, the Genode::Timer::curr_time always used the
Timer_session::elapsed_ms RPC as back end. Now, Genode::Timer reads
this remote time only in a periodic fashion independently from the calls
to Genode::Timer::curr_time. If now one calls Genode::Timer::curr_time,
the function takes the last read remote time value and adapts it using
the timestamp difference since the remote-time read. The conversion
factor from timestamps to time is estimated on every remote-time read
using the last read remote-time value and the timestamp difference since
the last remote time read.
This commit also re-works the timeout test. The test now has two stages.
In the first stage, it tests fast polling of the
Genode::Timer::curr_time. This stage checks the error between locally
interpolated and timer-driver time as well as wether the locally
interpolated time is monotone and sufficiently homogeneous. In the
second stage several periodic and one-shot timeouts are scheduled at
once. This stage checks if the timeouts trigger sufficiently precise.
This commit adds the new Kernel::time syscall to base-hw. The syscall is
solely used by the Genode::Timer on base-hw as substitute for the
timestamp. This is because on ARM, the timestamp function uses the ARM
performance counter that stops counting when the WFI (wait for
interrupt) instruction is active. This instruction, however is used by
the base-hw idle contexts that get active when no user thread needs to
be scheduled. Thus, the ARM performance counter is not a good choice for
time interpolation and we use the kernel internal time instead.
With this commit, the timeout library becomes a basic library. That means
that it is linked against the LDSO which then provides it to the program it
serves. Furthermore, you can't use the timeout library anymore without the
LDSO because through the kernel-dependent LDSO make-files we can achieve a
kernel-dependent timeout implementation.
This commit introduces a structured Duration type that shall successively
replace the use of Microseconds, Milliseconds, and integer types for duration
values.
Open issues:
* The timeout test fails on Raspberry PI because of precision errors in the
first stage. However, this does not render the framework unusable in general
on the RPI but merely is an issue when speaking of microseconds precision.
* If we run on ARM with another Kernel than HW the timestamp speed may
continuously vary from almost 0 up to CPU speed. The Timer, however,
only uses interpolation if the timestamp speed remained stable (12.5%
tolerance) for at least 3 observation periods. Currently, one period is
100ms, so its 300ms. As long as this is not the case,
Timer_session::elapsed_ms is called instead.
Anyway, it might happen that the CPU load was stable for some time so
interpolation becomes active and now the timestamp speed drops. In the
worst case, we would now have 100ms of slowed down time. The bad thing
about it would be, that this also affects the timeout of the period.
Thus, it might "freeze" the local time for more than 100ms.
On the other hand, if the timestamp speed suddenly raises after some
stable time, interpolated time can get too fast. This would shorten the
period but nonetheless may result in drifting away into the far future.
Now we would have the problem that we can't deliver the real time
anymore until it has caught up because the output of Timer::curr_time
shall be monotone. So, effectively local time might "freeze" again for
more than 100ms.
It would be a solution to not use the Trace::timestamp on ARM w/o HW but
a function whose return value causes the Timer to never use
interpolation because of its stability policy.
Fixes #2400
2017-04-22 00:52:23 +02:00
|
|
|
#include <timer/timeout.h>
|
2016-11-07 13:59:37 +01:00
|
|
|
|
|
|
|
using namespace Genode;
|
|
|
|
|
|
|
|
|
|
|
|
/*************
|
|
|
|
** Timeout **
|
|
|
|
*************/
|
|
|
|
|
|
|
|
void Timeout::schedule_periodic(Microseconds duration, Handler &handler)
|
|
|
|
{
|
|
|
|
_alarm.handler = &handler;
|
|
|
|
_alarm.periodic = true;
|
2016-12-20 17:01:23 +01:00
|
|
|
_alarm.timeout_scheduler._schedule_periodic(*this, duration);
|
2016-11-07 13:59:37 +01:00
|
|
|
}
|
|
|
|
|
os/timer: interpolate time via timestamps
Previously, the Genode::Timer::curr_time always used the
Timer_session::elapsed_ms RPC as back end. Now, Genode::Timer reads
this remote time only in a periodic fashion independently from the calls
to Genode::Timer::curr_time. If now one calls Genode::Timer::curr_time,
the function takes the last read remote time value and adapts it using
the timestamp difference since the remote-time read. The conversion
factor from timestamps to time is estimated on every remote-time read
using the last read remote-time value and the timestamp difference since
the last remote time read.
This commit also re-works the timeout test. The test now has two stages.
In the first stage, it tests fast polling of the
Genode::Timer::curr_time. This stage checks the error between locally
interpolated and timer-driver time as well as wether the locally
interpolated time is monotone and sufficiently homogeneous. In the
second stage several periodic and one-shot timeouts are scheduled at
once. This stage checks if the timeouts trigger sufficiently precise.
This commit adds the new Kernel::time syscall to base-hw. The syscall is
solely used by the Genode::Timer on base-hw as substitute for the
timestamp. This is because on ARM, the timestamp function uses the ARM
performance counter that stops counting when the WFI (wait for
interrupt) instruction is active. This instruction, however is used by
the base-hw idle contexts that get active when no user thread needs to
be scheduled. Thus, the ARM performance counter is not a good choice for
time interpolation and we use the kernel internal time instead.
With this commit, the timeout library becomes a basic library. That means
that it is linked against the LDSO which then provides it to the program it
serves. Furthermore, you can't use the timeout library anymore without the
LDSO because through the kernel-dependent LDSO make-files we can achieve a
kernel-dependent timeout implementation.
This commit introduces a structured Duration type that shall successively
replace the use of Microseconds, Milliseconds, and integer types for duration
values.
Open issues:
* The timeout test fails on Raspberry PI because of precision errors in the
first stage. However, this does not render the framework unusable in general
on the RPI but merely is an issue when speaking of microseconds precision.
* If we run on ARM with another Kernel than HW the timestamp speed may
continuously vary from almost 0 up to CPU speed. The Timer, however,
only uses interpolation if the timestamp speed remained stable (12.5%
tolerance) for at least 3 observation periods. Currently, one period is
100ms, so its 300ms. As long as this is not the case,
Timer_session::elapsed_ms is called instead.
Anyway, it might happen that the CPU load was stable for some time so
interpolation becomes active and now the timestamp speed drops. In the
worst case, we would now have 100ms of slowed down time. The bad thing
about it would be, that this also affects the timeout of the period.
Thus, it might "freeze" the local time for more than 100ms.
On the other hand, if the timestamp speed suddenly raises after some
stable time, interpolated time can get too fast. This would shorten the
period but nonetheless may result in drifting away into the far future.
Now we would have the problem that we can't deliver the real time
anymore until it has caught up because the output of Timer::curr_time
shall be monotone. So, effectively local time might "freeze" again for
more than 100ms.
It would be a solution to not use the Trace::timestamp on ARM w/o HW but
a function whose return value causes the Timer to never use
interpolation because of its stability policy.
Fixes #2400
2017-04-22 00:52:23 +02:00
|
|
|
|
2016-11-07 13:59:37 +01:00
|
|
|
void Timeout::schedule_one_shot(Microseconds duration, Handler &handler)
|
|
|
|
{
|
|
|
|
_alarm.handler = &handler;
|
|
|
|
_alarm.periodic = false;
|
2016-12-20 17:01:23 +01:00
|
|
|
_alarm.timeout_scheduler._schedule_one_shot(*this, duration);
|
|
|
|
}
|
|
|
|
|
os/timer: interpolate time via timestamps
Previously, the Genode::Timer::curr_time always used the
Timer_session::elapsed_ms RPC as back end. Now, Genode::Timer reads
this remote time only in a periodic fashion independently from the calls
to Genode::Timer::curr_time. If now one calls Genode::Timer::curr_time,
the function takes the last read remote time value and adapts it using
the timestamp difference since the remote-time read. The conversion
factor from timestamps to time is estimated on every remote-time read
using the last read remote-time value and the timestamp difference since
the last remote time read.
This commit also re-works the timeout test. The test now has two stages.
In the first stage, it tests fast polling of the
Genode::Timer::curr_time. This stage checks the error between locally
interpolated and timer-driver time as well as wether the locally
interpolated time is monotone and sufficiently homogeneous. In the
second stage several periodic and one-shot timeouts are scheduled at
once. This stage checks if the timeouts trigger sufficiently precise.
This commit adds the new Kernel::time syscall to base-hw. The syscall is
solely used by the Genode::Timer on base-hw as substitute for the
timestamp. This is because on ARM, the timestamp function uses the ARM
performance counter that stops counting when the WFI (wait for
interrupt) instruction is active. This instruction, however is used by
the base-hw idle contexts that get active when no user thread needs to
be scheduled. Thus, the ARM performance counter is not a good choice for
time interpolation and we use the kernel internal time instead.
With this commit, the timeout library becomes a basic library. That means
that it is linked against the LDSO which then provides it to the program it
serves. Furthermore, you can't use the timeout library anymore without the
LDSO because through the kernel-dependent LDSO make-files we can achieve a
kernel-dependent timeout implementation.
This commit introduces a structured Duration type that shall successively
replace the use of Microseconds, Milliseconds, and integer types for duration
values.
Open issues:
* The timeout test fails on Raspberry PI because of precision errors in the
first stage. However, this does not render the framework unusable in general
on the RPI but merely is an issue when speaking of microseconds precision.
* If we run on ARM with another Kernel than HW the timestamp speed may
continuously vary from almost 0 up to CPU speed. The Timer, however,
only uses interpolation if the timestamp speed remained stable (12.5%
tolerance) for at least 3 observation periods. Currently, one period is
100ms, so its 300ms. As long as this is not the case,
Timer_session::elapsed_ms is called instead.
Anyway, it might happen that the CPU load was stable for some time so
interpolation becomes active and now the timestamp speed drops. In the
worst case, we would now have 100ms of slowed down time. The bad thing
about it would be, that this also affects the timeout of the period.
Thus, it might "freeze" the local time for more than 100ms.
On the other hand, if the timestamp speed suddenly raises after some
stable time, interpolated time can get too fast. This would shorten the
period but nonetheless may result in drifting away into the far future.
Now we would have the problem that we can't deliver the real time
anymore until it has caught up because the output of Timer::curr_time
shall be monotone. So, effectively local time might "freeze" again for
more than 100ms.
It would be a solution to not use the Trace::timestamp on ARM w/o HW but
a function whose return value causes the Timer to never use
interpolation because of its stability policy.
Fixes #2400
2017-04-22 00:52:23 +02:00
|
|
|
|
2016-12-20 17:01:23 +01:00
|
|
|
void Timeout::discard()
|
|
|
|
{
|
|
|
|
_alarm.timeout_scheduler._discard(*this);
|
|
|
|
_alarm.handler = nullptr;
|
2016-11-07 13:59:37 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/********************
|
|
|
|
** Timeout::Alarm **
|
|
|
|
********************/
|
|
|
|
|
2018-03-20 15:35:00 +01:00
|
|
|
bool Timeout::Alarm::_on_alarm(unsigned)
|
2016-11-07 13:59:37 +01:00
|
|
|
{
|
|
|
|
if (handler) {
|
os/timer: interpolate time via timestamps
Previously, the Genode::Timer::curr_time always used the
Timer_session::elapsed_ms RPC as back end. Now, Genode::Timer reads
this remote time only in a periodic fashion independently from the calls
to Genode::Timer::curr_time. If now one calls Genode::Timer::curr_time,
the function takes the last read remote time value and adapts it using
the timestamp difference since the remote-time read. The conversion
factor from timestamps to time is estimated on every remote-time read
using the last read remote-time value and the timestamp difference since
the last remote time read.
This commit also re-works the timeout test. The test now has two stages.
In the first stage, it tests fast polling of the
Genode::Timer::curr_time. This stage checks the error between locally
interpolated and timer-driver time as well as wether the locally
interpolated time is monotone and sufficiently homogeneous. In the
second stage several periodic and one-shot timeouts are scheduled at
once. This stage checks if the timeouts trigger sufficiently precise.
This commit adds the new Kernel::time syscall to base-hw. The syscall is
solely used by the Genode::Timer on base-hw as substitute for the
timestamp. This is because on ARM, the timestamp function uses the ARM
performance counter that stops counting when the WFI (wait for
interrupt) instruction is active. This instruction, however is used by
the base-hw idle contexts that get active when no user thread needs to
be scheduled. Thus, the ARM performance counter is not a good choice for
time interpolation and we use the kernel internal time instead.
With this commit, the timeout library becomes a basic library. That means
that it is linked against the LDSO which then provides it to the program it
serves. Furthermore, you can't use the timeout library anymore without the
LDSO because through the kernel-dependent LDSO make-files we can achieve a
kernel-dependent timeout implementation.
This commit introduces a structured Duration type that shall successively
replace the use of Microseconds, Milliseconds, and integer types for duration
values.
Open issues:
* The timeout test fails on Raspberry PI because of precision errors in the
first stage. However, this does not render the framework unusable in general
on the RPI but merely is an issue when speaking of microseconds precision.
* If we run on ARM with another Kernel than HW the timestamp speed may
continuously vary from almost 0 up to CPU speed. The Timer, however,
only uses interpolation if the timestamp speed remained stable (12.5%
tolerance) for at least 3 observation periods. Currently, one period is
100ms, so its 300ms. As long as this is not the case,
Timer_session::elapsed_ms is called instead.
Anyway, it might happen that the CPU load was stable for some time so
interpolation becomes active and now the timestamp speed drops. In the
worst case, we would now have 100ms of slowed down time. The bad thing
about it would be, that this also affects the timeout of the period.
Thus, it might "freeze" the local time for more than 100ms.
On the other hand, if the timestamp speed suddenly raises after some
stable time, interpolated time can get too fast. This would shorten the
period but nonetheless may result in drifting away into the far future.
Now we would have the problem that we can't deliver the real time
anymore until it has caught up because the output of Timer::curr_time
shall be monotone. So, effectively local time might "freeze" again for
more than 100ms.
It would be a solution to not use the Trace::timestamp on ARM w/o HW but
a function whose return value causes the Timer to never use
interpolation because of its stability policy.
Fixes #2400
2017-04-22 00:52:23 +02:00
|
|
|
Handler *current = handler;
|
|
|
|
if (!periodic) {
|
|
|
|
handler = nullptr;
|
|
|
|
}
|
|
|
|
current->handle_timeout(timeout_scheduler.curr_time());
|
|
|
|
}
|
2016-11-07 13:59:37 +01:00
|
|
|
return periodic;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-03-20 15:35:00 +01:00
|
|
|
Timeout::Alarm::~Alarm()
|
|
|
|
{
|
|
|
|
if (_scheduler)
|
|
|
|
_scheduler->_alarm_discard(this);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
bool Timeout::Alarm::Raw::is_pending_at(unsigned long time, bool time_period) const
|
|
|
|
{
|
|
|
|
return (time_period == deadline_period &&
|
|
|
|
time >= deadline) ||
|
|
|
|
(time_period != deadline_period &&
|
|
|
|
time < deadline);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-11-07 13:59:37 +01:00
|
|
|
/*****************************
|
|
|
|
** Alarm_timeout_scheduler **
|
|
|
|
*****************************/
|
|
|
|
|
2017-11-15 18:10:32 +01:00
|
|
|
void Alarm_timeout_scheduler::handle_timeout(Duration duration)
|
2016-11-07 13:59:37 +01:00
|
|
|
{
|
2017-11-15 18:10:32 +01:00
|
|
|
unsigned long const curr_time_us = duration.trunc_to_plain_us().value;
|
2017-09-17 14:43:40 +02:00
|
|
|
|
2018-03-20 15:35:00 +01:00
|
|
|
_alarm_handle(curr_time_us);
|
2016-11-07 13:59:37 +01:00
|
|
|
|
2017-09-29 14:12:10 +02:00
|
|
|
/* sleep time is either until the next deadline or the maximum timout */
|
2016-11-07 13:59:37 +01:00
|
|
|
unsigned long sleep_time_us;
|
|
|
|
Alarm::Time deadline_us;
|
2018-03-20 15:35:00 +01:00
|
|
|
if (_alarm_next_deadline(&deadline_us)) {
|
os/timer: interpolate time via timestamps
Previously, the Genode::Timer::curr_time always used the
Timer_session::elapsed_ms RPC as back end. Now, Genode::Timer reads
this remote time only in a periodic fashion independently from the calls
to Genode::Timer::curr_time. If now one calls Genode::Timer::curr_time,
the function takes the last read remote time value and adapts it using
the timestamp difference since the remote-time read. The conversion
factor from timestamps to time is estimated on every remote-time read
using the last read remote-time value and the timestamp difference since
the last remote time read.
This commit also re-works the timeout test. The test now has two stages.
In the first stage, it tests fast polling of the
Genode::Timer::curr_time. This stage checks the error between locally
interpolated and timer-driver time as well as wether the locally
interpolated time is monotone and sufficiently homogeneous. In the
second stage several periodic and one-shot timeouts are scheduled at
once. This stage checks if the timeouts trigger sufficiently precise.
This commit adds the new Kernel::time syscall to base-hw. The syscall is
solely used by the Genode::Timer on base-hw as substitute for the
timestamp. This is because on ARM, the timestamp function uses the ARM
performance counter that stops counting when the WFI (wait for
interrupt) instruction is active. This instruction, however is used by
the base-hw idle contexts that get active when no user thread needs to
be scheduled. Thus, the ARM performance counter is not a good choice for
time interpolation and we use the kernel internal time instead.
With this commit, the timeout library becomes a basic library. That means
that it is linked against the LDSO which then provides it to the program it
serves. Furthermore, you can't use the timeout library anymore without the
LDSO because through the kernel-dependent LDSO make-files we can achieve a
kernel-dependent timeout implementation.
This commit introduces a structured Duration type that shall successively
replace the use of Microseconds, Milliseconds, and integer types for duration
values.
Open issues:
* The timeout test fails on Raspberry PI because of precision errors in the
first stage. However, this does not render the framework unusable in general
on the RPI but merely is an issue when speaking of microseconds precision.
* If we run on ARM with another Kernel than HW the timestamp speed may
continuously vary from almost 0 up to CPU speed. The Timer, however,
only uses interpolation if the timestamp speed remained stable (12.5%
tolerance) for at least 3 observation periods. Currently, one period is
100ms, so its 300ms. As long as this is not the case,
Timer_session::elapsed_ms is called instead.
Anyway, it might happen that the CPU load was stable for some time so
interpolation becomes active and now the timestamp speed drops. In the
worst case, we would now have 100ms of slowed down time. The bad thing
about it would be, that this also affects the timeout of the period.
Thus, it might "freeze" the local time for more than 100ms.
On the other hand, if the timestamp speed suddenly raises after some
stable time, interpolated time can get too fast. This would shorten the
period but nonetheless may result in drifting away into the far future.
Now we would have the problem that we can't deliver the real time
anymore until it has caught up because the output of Timer::curr_time
shall be monotone. So, effectively local time might "freeze" again for
more than 100ms.
It would be a solution to not use the Trace::timestamp on ARM w/o HW but
a function whose return value causes the Timer to never use
interpolation because of its stability policy.
Fixes #2400
2017-04-22 00:52:23 +02:00
|
|
|
sleep_time_us = deadline_us - curr_time_us;
|
2016-11-07 13:59:37 +01:00
|
|
|
} else {
|
|
|
|
sleep_time_us = _time_source.max_timeout().value; }
|
|
|
|
|
2016-12-05 14:36:24 +01:00
|
|
|
/* limit max timeout to a more reasonable value, e.g. 60s */
|
|
|
|
if (sleep_time_us > 60000000) {
|
|
|
|
sleep_time_us = 60000000;
|
|
|
|
} else if (sleep_time_us == 0) {
|
2016-11-07 13:59:37 +01:00
|
|
|
sleep_time_us = 1; }
|
|
|
|
|
|
|
|
_time_source.schedule_timeout(Microseconds(sleep_time_us), *this);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-09-29 14:12:10 +02:00
|
|
|
Alarm_timeout_scheduler::Alarm_timeout_scheduler(Time_source &time_source,
|
|
|
|
Microseconds min_handle_period)
|
2016-11-07 13:59:37 +01:00
|
|
|
:
|
2018-03-20 15:35:00 +01:00
|
|
|
_time_source(time_source)
|
|
|
|
{
|
|
|
|
Alarm::Time const deadline = _now + min_handle_period.value;
|
|
|
|
_min_handle_period.period = min_handle_period.value;
|
|
|
|
_min_handle_period.deadline = deadline;
|
|
|
|
_min_handle_period.deadline_period = _now > deadline ?
|
|
|
|
!_now_period : _now_period;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
Alarm_timeout_scheduler::~Alarm_timeout_scheduler()
|
|
|
|
{
|
|
|
|
Lock::Guard lock_guard(_lock);
|
|
|
|
while (_head) {
|
|
|
|
Alarm *next = _head->_next;
|
|
|
|
_head->_alarm_reset();
|
|
|
|
_head = next;
|
|
|
|
}
|
|
|
|
}
|
os/timer: interpolate time via timestamps
Previously, the Genode::Timer::curr_time always used the
Timer_session::elapsed_ms RPC as back end. Now, Genode::Timer reads
this remote time only in a periodic fashion independently from the calls
to Genode::Timer::curr_time. If now one calls Genode::Timer::curr_time,
the function takes the last read remote time value and adapts it using
the timestamp difference since the remote-time read. The conversion
factor from timestamps to time is estimated on every remote-time read
using the last read remote-time value and the timestamp difference since
the last remote time read.
This commit also re-works the timeout test. The test now has two stages.
In the first stage, it tests fast polling of the
Genode::Timer::curr_time. This stage checks the error between locally
interpolated and timer-driver time as well as wether the locally
interpolated time is monotone and sufficiently homogeneous. In the
second stage several periodic and one-shot timeouts are scheduled at
once. This stage checks if the timeouts trigger sufficiently precise.
This commit adds the new Kernel::time syscall to base-hw. The syscall is
solely used by the Genode::Timer on base-hw as substitute for the
timestamp. This is because on ARM, the timestamp function uses the ARM
performance counter that stops counting when the WFI (wait for
interrupt) instruction is active. This instruction, however is used by
the base-hw idle contexts that get active when no user thread needs to
be scheduled. Thus, the ARM performance counter is not a good choice for
time interpolation and we use the kernel internal time instead.
With this commit, the timeout library becomes a basic library. That means
that it is linked against the LDSO which then provides it to the program it
serves. Furthermore, you can't use the timeout library anymore without the
LDSO because through the kernel-dependent LDSO make-files we can achieve a
kernel-dependent timeout implementation.
This commit introduces a structured Duration type that shall successively
replace the use of Microseconds, Milliseconds, and integer types for duration
values.
Open issues:
* The timeout test fails on Raspberry PI because of precision errors in the
first stage. However, this does not render the framework unusable in general
on the RPI but merely is an issue when speaking of microseconds precision.
* If we run on ARM with another Kernel than HW the timestamp speed may
continuously vary from almost 0 up to CPU speed. The Timer, however,
only uses interpolation if the timestamp speed remained stable (12.5%
tolerance) for at least 3 observation periods. Currently, one period is
100ms, so its 300ms. As long as this is not the case,
Timer_session::elapsed_ms is called instead.
Anyway, it might happen that the CPU load was stable for some time so
interpolation becomes active and now the timestamp speed drops. In the
worst case, we would now have 100ms of slowed down time. The bad thing
about it would be, that this also affects the timeout of the period.
Thus, it might "freeze" the local time for more than 100ms.
On the other hand, if the timestamp speed suddenly raises after some
stable time, interpolated time can get too fast. This would shorten the
period but nonetheless may result in drifting away into the far future.
Now we would have the problem that we can't deliver the real time
anymore until it has caught up because the output of Timer::curr_time
shall be monotone. So, effectively local time might "freeze" again for
more than 100ms.
It would be a solution to not use the Trace::timestamp on ARM w/o HW but
a function whose return value causes the Timer to never use
interpolation because of its stability policy.
Fixes #2400
2017-04-22 00:52:23 +02:00
|
|
|
|
|
|
|
|
|
|
|
void Alarm_timeout_scheduler::_enable()
|
2016-11-07 13:59:37 +01:00
|
|
|
{
|
os/timer: interpolate time via timestamps
Previously, the Genode::Timer::curr_time always used the
Timer_session::elapsed_ms RPC as back end. Now, Genode::Timer reads
this remote time only in a periodic fashion independently from the calls
to Genode::Timer::curr_time. If now one calls Genode::Timer::curr_time,
the function takes the last read remote time value and adapts it using
the timestamp difference since the remote-time read. The conversion
factor from timestamps to time is estimated on every remote-time read
using the last read remote-time value and the timestamp difference since
the last remote time read.
This commit also re-works the timeout test. The test now has two stages.
In the first stage, it tests fast polling of the
Genode::Timer::curr_time. This stage checks the error between locally
interpolated and timer-driver time as well as wether the locally
interpolated time is monotone and sufficiently homogeneous. In the
second stage several periodic and one-shot timeouts are scheduled at
once. This stage checks if the timeouts trigger sufficiently precise.
This commit adds the new Kernel::time syscall to base-hw. The syscall is
solely used by the Genode::Timer on base-hw as substitute for the
timestamp. This is because on ARM, the timestamp function uses the ARM
performance counter that stops counting when the WFI (wait for
interrupt) instruction is active. This instruction, however is used by
the base-hw idle contexts that get active when no user thread needs to
be scheduled. Thus, the ARM performance counter is not a good choice for
time interpolation and we use the kernel internal time instead.
With this commit, the timeout library becomes a basic library. That means
that it is linked against the LDSO which then provides it to the program it
serves. Furthermore, you can't use the timeout library anymore without the
LDSO because through the kernel-dependent LDSO make-files we can achieve a
kernel-dependent timeout implementation.
This commit introduces a structured Duration type that shall successively
replace the use of Microseconds, Milliseconds, and integer types for duration
values.
Open issues:
* The timeout test fails on Raspberry PI because of precision errors in the
first stage. However, this does not render the framework unusable in general
on the RPI but merely is an issue when speaking of microseconds precision.
* If we run on ARM with another Kernel than HW the timestamp speed may
continuously vary from almost 0 up to CPU speed. The Timer, however,
only uses interpolation if the timestamp speed remained stable (12.5%
tolerance) for at least 3 observation periods. Currently, one period is
100ms, so its 300ms. As long as this is not the case,
Timer_session::elapsed_ms is called instead.
Anyway, it might happen that the CPU load was stable for some time so
interpolation becomes active and now the timestamp speed drops. In the
worst case, we would now have 100ms of slowed down time. The bad thing
about it would be, that this also affects the timeout of the period.
Thus, it might "freeze" the local time for more than 100ms.
On the other hand, if the timestamp speed suddenly raises after some
stable time, interpolated time can get too fast. This would shorten the
period but nonetheless may result in drifting away into the far future.
Now we would have the problem that we can't deliver the real time
anymore until it has caught up because the output of Timer::curr_time
shall be monotone. So, effectively local time might "freeze" again for
more than 100ms.
It would be a solution to not use the Trace::timestamp on ARM w/o HW but
a function whose return value causes the Timer to never use
interpolation because of its stability policy.
Fixes #2400
2017-04-22 00:52:23 +02:00
|
|
|
_time_source.schedule_timeout(Microseconds(0), *this);
|
2016-11-07 13:59:37 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-12-20 17:01:23 +01:00
|
|
|
void Alarm_timeout_scheduler::_schedule_one_shot(Timeout &timeout,
|
|
|
|
Microseconds duration)
|
2016-11-07 13:59:37 +01:00
|
|
|
{
|
2017-09-12 14:26:45 +02:00
|
|
|
unsigned long const curr_time_us =
|
|
|
|
_time_source.curr_time().trunc_to_plain_us().value;
|
|
|
|
|
|
|
|
/* ensure that the schedulers time is up-to-date before adding a timeout */
|
2018-03-20 15:35:00 +01:00
|
|
|
_alarm_handle(curr_time_us);
|
|
|
|
_alarm_schedule_absolute(&timeout._alarm,
|
2017-09-12 14:26:45 +02:00
|
|
|
curr_time_us + duration.value);
|
2016-11-07 13:59:37 +01:00
|
|
|
|
2018-03-20 15:35:00 +01:00
|
|
|
if (_alarm_head_timeout(&timeout._alarm)) {
|
2016-11-07 13:59:37 +01:00
|
|
|
_time_source.schedule_timeout(Microseconds(0), *this); }
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-12-20 17:01:23 +01:00
|
|
|
void Alarm_timeout_scheduler::_schedule_periodic(Timeout &timeout,
|
|
|
|
Microseconds duration)
|
2016-11-07 13:59:37 +01:00
|
|
|
{
|
2017-09-12 14:26:45 +02:00
|
|
|
/* ensure that the schedulers time is up-to-date before adding a timeout */
|
2018-03-20 15:35:00 +01:00
|
|
|
_alarm_handle(_time_source.curr_time().trunc_to_plain_us().value);
|
|
|
|
_alarm_schedule(&timeout._alarm, duration.value);
|
2017-09-12 14:26:45 +02:00
|
|
|
|
2018-03-20 15:35:00 +01:00
|
|
|
if (_alarm_head_timeout(&timeout._alarm)) {
|
2016-11-07 13:59:37 +01:00
|
|
|
_time_source.schedule_timeout(Microseconds(0), *this); }
|
|
|
|
}
|
2018-03-20 15:35:00 +01:00
|
|
|
|
|
|
|
|
|
|
|
void Alarm_timeout_scheduler::_alarm_unsynchronized_enqueue(Alarm *alarm)
|
|
|
|
{
|
|
|
|
if (alarm->_active) {
|
|
|
|
error("trying to insert the same alarm twice!");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
alarm->_active++;
|
|
|
|
|
|
|
|
/* if alarmlist is empty add first element */
|
|
|
|
if (!_head) {
|
|
|
|
alarm->_next = 0;
|
|
|
|
_head = alarm;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* if deadline is smaller than any other deadline, put it on the head */
|
|
|
|
if (alarm->_raw.is_pending_at(_head->_raw.deadline, _head->_raw.deadline_period)) {
|
|
|
|
alarm->_next = _head;
|
|
|
|
_head = alarm;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* find list element with a higher deadline */
|
|
|
|
Alarm *curr = _head;
|
|
|
|
while (curr->_next &&
|
|
|
|
curr->_next->_raw.is_pending_at(alarm->_raw.deadline, alarm->_raw.deadline_period))
|
|
|
|
{
|
|
|
|
curr = curr->_next;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* if end of list is reached, append new element */
|
|
|
|
if (curr->_next == 0) {
|
|
|
|
curr->_next = alarm;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* insert element in middle of list */
|
|
|
|
alarm->_next = curr->_next;
|
|
|
|
curr->_next = alarm;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void Alarm_timeout_scheduler::_alarm_unsynchronized_dequeue(Alarm *alarm)
|
|
|
|
{
|
|
|
|
if (!_head) return;
|
|
|
|
|
|
|
|
if (_head == alarm) {
|
|
|
|
_head = alarm->_next;
|
|
|
|
alarm->_alarm_reset();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* find predecessor in alarm queue */
|
|
|
|
Alarm *curr;
|
|
|
|
for (curr = _head; curr && (curr->_next != alarm); curr = curr->_next);
|
|
|
|
|
|
|
|
/* alarm is not enqueued */
|
|
|
|
if (!curr) return;
|
|
|
|
|
|
|
|
/* remove alarm from alarm queue */
|
|
|
|
curr->_next = alarm->_next;
|
|
|
|
alarm->_alarm_reset();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
Timeout::Alarm *Alarm_timeout_scheduler::_alarm_get_pending_alarm()
|
|
|
|
{
|
|
|
|
Lock::Guard lock_guard(_lock);
|
|
|
|
|
|
|
|
if (!_head || !_head->_raw.is_pending_at(_now, _now_period)) {
|
|
|
|
return nullptr; }
|
|
|
|
|
|
|
|
/* remove alarm from head of the list */
|
|
|
|
Alarm *pending_alarm = _head;
|
|
|
|
_head = _head->_next;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Acquire dispatch lock to defer destruction until the call of '_on_alarm'
|
|
|
|
* is finished
|
|
|
|
*/
|
|
|
|
pending_alarm->_dispatch_lock.lock();
|
|
|
|
|
|
|
|
/* reset alarm object */
|
|
|
|
pending_alarm->_next = nullptr;
|
|
|
|
pending_alarm->_active--;
|
|
|
|
|
|
|
|
return pending_alarm;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void Alarm_timeout_scheduler::_alarm_handle(Alarm::Time curr_time)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Raise the time counter and if it wraps, update also in which
|
|
|
|
* period of the time counter we are.
|
|
|
|
*/
|
|
|
|
if (_now > curr_time) {
|
|
|
|
_now_period = !_now_period;
|
|
|
|
}
|
|
|
|
_now = curr_time;
|
|
|
|
|
|
|
|
if (!_min_handle_period.is_pending_at(_now, _now_period)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
Alarm::Time const deadline = _now + _min_handle_period.period;
|
|
|
|
_min_handle_period.deadline = deadline;
|
|
|
|
_min_handle_period.deadline_period = _now > deadline ?
|
|
|
|
!_now_period : _now_period;
|
|
|
|
|
|
|
|
Alarm *curr;
|
|
|
|
while ((curr = _alarm_get_pending_alarm())) {
|
|
|
|
|
|
|
|
unsigned long triggered = 1;
|
|
|
|
|
|
|
|
if (curr->_raw.period) {
|
|
|
|
Alarm::Time deadline = curr->_raw.deadline;
|
|
|
|
|
|
|
|
/* schedule next event */
|
|
|
|
if (deadline == 0)
|
|
|
|
deadline = curr_time;
|
|
|
|
|
|
|
|
triggered += (curr_time - deadline) / curr->_raw.period;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* do not reschedule if alarm function returns 0 */
|
|
|
|
bool reschedule = curr->_on_alarm(triggered);
|
|
|
|
|
|
|
|
if (reschedule) {
|
|
|
|
|
|
|
|
/*
|
|
|
|
* At this point, the alarm deadline normally is somewhere near
|
|
|
|
* the current time but If the alarm had no deadline by now,
|
|
|
|
* initialize it with the current time.
|
|
|
|
*/
|
|
|
|
if (curr->_raw.deadline == 0) {
|
|
|
|
curr->_raw.deadline = _now;
|
|
|
|
curr->_raw.deadline_period = _now_period;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Raise the deadline value by one period of the alarm and
|
|
|
|
* if the deadline value wraps thereby, update also in which
|
|
|
|
* period it is located.
|
|
|
|
*/
|
|
|
|
Alarm::Time const deadline = curr->_raw.deadline +
|
|
|
|
triggered * curr->_raw.period;
|
|
|
|
if (curr->_raw.deadline > deadline) {
|
|
|
|
curr->_raw.deadline_period = !curr->_raw.deadline_period;
|
|
|
|
}
|
|
|
|
curr->_raw.deadline = deadline;
|
|
|
|
|
|
|
|
/* synchronize enqueue operation */
|
|
|
|
Lock::Guard lock_guard(_lock);
|
|
|
|
_alarm_unsynchronized_enqueue(curr);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* release alarm, resume concurrent destructor operation */
|
|
|
|
curr->_dispatch_lock.unlock();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void Alarm_timeout_scheduler::_alarm_setup_alarm(Alarm &alarm, Alarm::Time period, Alarm::Time deadline)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* If the alarm is already present in the queue, re-consider its queue
|
|
|
|
* position because its deadline might have changed. I.e., if an alarm is
|
|
|
|
* rescheduled with a new timeout before the original timeout triggered.
|
|
|
|
*/
|
|
|
|
if (alarm._active)
|
|
|
|
_alarm_unsynchronized_dequeue(&alarm);
|
|
|
|
|
|
|
|
alarm._alarm_assign(period, deadline, _now > deadline ? !_now_period : _now_period, this);
|
|
|
|
|
|
|
|
_alarm_unsynchronized_enqueue(&alarm);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void Alarm_timeout_scheduler::_alarm_schedule_absolute(Alarm *alarm, Alarm::Time timeout)
|
|
|
|
{
|
|
|
|
Lock::Guard alarm_list_lock_guard(_lock);
|
|
|
|
|
|
|
|
_alarm_setup_alarm(*alarm, 0, timeout);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void Alarm_timeout_scheduler::_alarm_schedule(Alarm *alarm, Alarm::Time period)
|
|
|
|
{
|
|
|
|
Lock::Guard alarm_list_lock_guard(_lock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Refuse to schedule a periodic timeout of 0 because it would trigger
|
|
|
|
* infinitely in the 'handle' function. To account for the case where the
|
|
|
|
* alarm object was already scheduled, we make sure to remove it from the
|
|
|
|
* queue.
|
|
|
|
*/
|
|
|
|
if (period == 0) {
|
|
|
|
_alarm_unsynchronized_dequeue(alarm);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* first deadline is overdue */
|
|
|
|
_alarm_setup_alarm(*alarm, period, _now);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void Alarm_timeout_scheduler::_alarm_discard(Alarm *alarm)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Make sure that nobody is inside the '_alarm_get_pending_alarm' when
|
|
|
|
* grabbing the '_dispatch_lock'. This is important when this function
|
|
|
|
* is called from the 'Alarm' destructor. Without the '_dispatch_lock',
|
|
|
|
* we could take the lock and proceed with destruction just before
|
|
|
|
* '_alarm_get_pending_alarm' tries to grab the lock. When the destructor is
|
|
|
|
* finished, '_alarm_get_pending_alarm' would proceed with operating on a
|
|
|
|
* dangling pointer.
|
|
|
|
*/
|
|
|
|
Lock::Guard alarm_list_lock_guard(_lock);
|
|
|
|
|
|
|
|
if (alarm) {
|
|
|
|
Lock::Guard alarm_lock_guard(alarm->_dispatch_lock);
|
|
|
|
_alarm_unsynchronized_dequeue(alarm);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
bool Alarm_timeout_scheduler::_alarm_next_deadline(Alarm::Time *deadline)
|
|
|
|
{
|
|
|
|
Lock::Guard alarm_list_lock_guard(_lock);
|
|
|
|
|
|
|
|
if (!_head) return false;
|
|
|
|
|
|
|
|
if (deadline)
|
|
|
|
*deadline = _head->_raw.deadline;
|
|
|
|
|
|
|
|
if (*deadline < _min_handle_period.deadline) {
|
|
|
|
*deadline = _min_handle_period.deadline;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|