hw: fix bug in scheduler timing

By now the scheduling timer was only refreshed for a new scheduling timeout
when the choosen scheduling context has changed. But we want it to be refreshed
also when the scheduled context yields without an effect to the schedulers
choice (this is the case e.g. when the idle thread gets a scheduling timeout
or a thread yields without any competitor in its priority band).

ref #1151
This commit is contained in:
Martin Stein 2014-05-21 15:52:05 +02:00 committed by Norman Feske
parent cf73ae2c20
commit ccba43574f
5 changed files with 108 additions and 68 deletions

View File

@ -74,25 +74,6 @@ namespace Kernel
Signal_context_pool * signal_context_pool() { return unmanaged_singleton<Signal_context_pool>(); } Signal_context_pool * signal_context_pool() { return unmanaged_singleton<Signal_context_pool>(); }
Signal_receiver_pool * signal_receiver_pool() { return unmanaged_singleton<Signal_receiver_pool>(); } Signal_receiver_pool * signal_receiver_pool() { return unmanaged_singleton<Signal_receiver_pool>(); }
/**
* Return singleton kernel-timer
*/
Timer * timer()
{
static Timer _object;
return &_object;
}
/**
* Start a new scheduling lap
*/
void reset_scheduling_time(unsigned const processor_id)
{
unsigned const tics = timer()->ms_to_tics(USER_LAP_TIME_MS);
timer()->start_one_shot(tics, processor_id);
}
/** /**
* Static kernel PD that describes core * Static kernel PD that describes core
*/ */
@ -287,9 +268,13 @@ extern "C" void init_kernel_multiprocessor()
*/ */
perf_counter()->enable(); perf_counter()->enable();
/* initialize interrupt controller */ /* locally initialize processor */
pic()->init_processor_local();
unsigned const processor_id = Processor::executing_id(); unsigned const processor_id = Processor::executing_id();
Processor * const processor = processor_pool()->processor(processor_id);
processor->init_processor_local();
/* locally initialize interrupt controller */
pic()->init_processor_local();
pic()->unmask(Timer::interrupt_id(processor_id), processor_id); pic()->unmask(Timer::interrupt_id(processor_id), processor_id);
/* as primary processor create the core main thread */ /* as primary processor create the core main thread */
@ -315,7 +300,7 @@ extern "C" void init_kernel_multiprocessor()
_main_thread_utcb->start_info()->init(t.id(), Genode::Native_capability()); _main_thread_utcb->start_info()->init(t.id(), Genode::Native_capability());
t.ip = (addr_t)CORE_MAIN;; t.ip = (addr_t)CORE_MAIN;;
t.sp = (addr_t)s + STACK_SIZE; t.sp = (addr_t)s + STACK_SIZE;
t.init(processor_pool()->processor(processor_id), core_pd(), &utcb, 1); t.init(processor, core_pd(), &utcb, 1);
/* initialize interrupt objects */ /* initialize interrupt objects */
static Genode::uint8_t _irqs[Pic::MAX_INTERRUPT_ID * sizeof(Irq)]; static Genode::uint8_t _irqs[Pic::MAX_INTERRUPT_ID * sizeof(Irq)];
@ -326,7 +311,6 @@ extern "C" void init_kernel_multiprocessor()
/* kernel initialization finished */ /* kernel initialization finished */
Genode::printf("kernel initialized\n"); Genode::printf("kernel initialized\n");
} }
reset_scheduling_time(processor_id);
} }
@ -338,31 +322,10 @@ extern "C" void kernel()
/* ensure that no other processor accesses kernel data while we do */ /* ensure that no other processor accesses kernel data while we do */
data_lock().lock(); data_lock().lock();
/* determine local processor scheduler */ /* determine local processor object and let it handle its exception */
unsigned const processor_id = Processor::executing_id(); unsigned const processor_id = Processor::executing_id();
Processor * const processor = processor_pool()->processor(processor_id); Processor * const processor = processor_pool()->processor(processor_id);
Scheduler<Processor_client> * const scheduler = processor->scheduler(); processor->exception();
/*
* Request the current processor occupant without any update. While this
* processor was outside the kernel, another processor may have changed the
* scheduling of the local activities in a way that an update would return
* an occupant other than that whose exception caused the kernel entry.
*/
Processor_client * const old_client = scheduler->occupant();
Processor_lazy_state * const old_state = old_client->lazy_state();
old_client->exception(processor_id);
/*
* The processor local as well as remote exception-handling may have
* changed the scheduling of the local activities. Hence we must update the
* processor occupant.
*/
Processor_client * const new_client = scheduler->update_occupant();
Processor_lazy_state * const new_state = new_client->lazy_state();
if (old_client != new_client) { reset_scheduling_time(processor_id); }
processor->prepare_proceeding(old_state, new_state);
new_client->proceed(processor_id);
} }

View File

@ -44,16 +44,13 @@ void Kernel::Processor_client::_interrupt(unsigned const processor_id)
/* determine handling for specific interrupt */ /* determine handling for specific interrupt */
unsigned irq_id; unsigned irq_id;
Pic * const ic = pic(); Pic * const ic = pic();
if (ic->take_request(irq_id)) if (ic->take_request(irq_id)) {
{
/* check wether the interrupt is a processor-scheduling timeout */
if (timer()->interrupt_id(processor_id) == irq_id) {
_processor->scheduler()->yield_occupation(); /* check wether the interrupt is a processor-scheduling timeout */
timer()->clear_interrupt(processor_id); if (!_processor->check_timer_interrupt(irq_id)) {
/* check wether the interrupt is our inter-processor interrupt */ /* check wether the interrupt is our inter-processor interrupt */
} else if (ic->is_ip_interrupt(irq_id, processor_id)) { if (ic->is_ip_interrupt(irq_id, processor_id)) {
_processor->ip_interrupt(); _processor->ip_interrupt();
@ -64,6 +61,7 @@ void Kernel::Processor_client::_interrupt(unsigned const processor_id)
Irq::occurred(irq_id); Irq::occurred(irq_id);
} }
} }
}
/* end interrupt request at controller */ /* end interrupt request at controller */
ic->finish_request(); ic->finish_request();
} }

View File

@ -16,6 +16,7 @@
#define _KERNEL__PROCESSOR_H_ #define _KERNEL__PROCESSOR_H_
/* core includes */ /* core includes */
#include <timer.h>
#include <processor_driver.h> #include <processor_driver.h>
#include <kernel/scheduler.h> #include <kernel/scheduler.h>
@ -142,6 +143,16 @@ class Kernel::Processor : public Processor_driver
unsigned const _id; unsigned const _id;
Processor_scheduler _scheduler; Processor_scheduler _scheduler;
bool _ip_interrupt_pending; bool _ip_interrupt_pending;
Timer * const _timer;
/**
* Reset the scheduling timer for a new scheduling interval
*/
void _reset_timer()
{
unsigned const tics = _timer->ms_to_tics(USER_LAP_TIME_MS);
_timer->start_one_shot(tics, _id);
}
public: public:
@ -150,12 +161,35 @@ class Kernel::Processor : public Processor_driver
* *
* \param id kernel name of the processor object * \param id kernel name of the processor object
* \param idle_client client that gets scheduled on idle * \param idle_client client that gets scheduled on idle
* \param timer timer that is used for scheduling the processor
*/ */
Processor(unsigned const id, Processor_client * const idle_client) Processor(unsigned const id, Processor_client * const idle_client,
Timer * const timer)
: :
_id(id), _scheduler(idle_client), _ip_interrupt_pending(false) _id(id), _scheduler(idle_client), _ip_interrupt_pending(false),
_timer(timer)
{ } { }
/**
* Initializate on the processor that this object corresponds to
*/
void init_processor_local() { _reset_timer(); }
/**
* Check for a scheduling timeout and handle it in case
*
* \param interrupt_id kernel name of interrupt that caused this call
*
* \return wether it was a timeout and therefore has been handled
*/
bool check_timer_interrupt(unsigned const interrupt_id)
{
if (_timer->interrupt_id(_id) != interrupt_id) { return false; }
_scheduler.yield_occupation();
_timer->clear_interrupt(_id);
return true;
}
/** /**
* Perform outstanding TLB maintainance work * Perform outstanding TLB maintainance work
*/ */
@ -183,6 +217,33 @@ class Kernel::Processor : public Processor_driver
*/ */
void schedule(Processor_client * const client); void schedule(Processor_client * const client);
/**
* Handle exception of the processor and proceed its user execution
*/
void exception()
{
/*
* Request the current occupant without any update. While the
* processor was outside the kernel, another processor may have changed the
* scheduling of the local activities in a way that an update would return
* an occupant other than that whose exception caused the kernel entry.
*/
Processor_client * const old_client = _scheduler.occupant();
Processor_lazy_state * const old_state = old_client->lazy_state();
old_client->exception(_id);
/*
* The processor local as well as remote exception-handling may have
* changed the scheduling of the local activities. Hence we must update the
* occupant.
*/
bool update;
Processor_client * const new_client = _scheduler.update_occupant(update);
if (update) { _reset_timer(); }
Processor_lazy_state * const new_state = new_client->lazy_state();
prepare_proceeding(old_state, new_state);
new_client->proceed(_id);
}
/*************** /***************
** Accessors ** ** Accessors **

View File

@ -80,6 +80,7 @@ class Kernel::Processor_pool
{ {
private: private:
Timer _timer;
char _processors[PROCESSORS][sizeof(Processor)]; char _processors[PROCESSORS][sizeof(Processor)];
char _idle_threads[PROCESSORS][sizeof(Idle_thread)]; char _idle_threads[PROCESSORS][sizeof(Idle_thread)];
@ -103,7 +104,7 @@ class Kernel::Processor_pool
{ {
for (unsigned i = 0; i < PROCESSORS; i++) { for (unsigned i = 0; i < PROCESSORS; i++) {
new (_idle_threads[i]) Idle_thread(processor(i)); new (_idle_threads[i]) Idle_thread(processor(i));
new (_processors[i]) Processor(i, _idle_thread(i)); new (_processors[i]) Processor(i, _idle_thread(i), &_timer);
} }
} }

View File

@ -232,11 +232,22 @@ class Kernel::Scheduler_item : public Double_list<T>::Item
template <typename T> template <typename T>
class Kernel::Scheduler class Kernel::Scheduler
{ {
protected: private:
T * const _idle; T * const _idle;
T * _occupant; T * _occupant;
Double_list<T> _items[Priority::MAX + 1]; Double_list<T> _items[Priority::MAX + 1];
bool _yield;
bool _check_update(T * const occupant)
{
if (_yield) {
_yield = false;
return true;
}
if (_occupant != occupant) { return true; }
return false;
}
public: public:
@ -252,12 +263,17 @@ class Kernel::Scheduler
* *
* \return updated occupant reference * \return updated occupant reference
*/ */
T * update_occupant() T * update_occupant(bool & update)
{ {
for (int i = Priority::MAX; i >= 0 ; i--) { for (int i = Priority::MAX; i >= 0 ; i--) {
_occupant = _items[i].head(); T * const head = _items[i].head();
if (_occupant) { return _occupant; } if (!head) { continue; }
update = _check_update(head);
_occupant = head;
return head;
} }
update = _check_update(_idle);
_occupant = 0;
return _idle; return _idle;
} }
@ -266,6 +282,7 @@ class Kernel::Scheduler
*/ */
void yield_occupation() void yield_occupation()
{ {
_yield = true;
if (!_occupant) { return; } if (!_occupant) { return; }
_items[_occupant->priority()].head_to_tail(); _items[_occupant->priority()].head_to_tail();
} }