2012-05-30 18:13:09 +00:00
|
|
|
/*
|
2013-11-14 16:29:34 +00:00
|
|
|
* \brief Interface between kernel and userland
|
2012-05-30 18:13:09 +00:00
|
|
|
* \author Martin stein
|
|
|
|
* \date 2011-11-30
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
2013-01-10 20:44:47 +00:00
|
|
|
* Copyright (C) 2011-2013 Genode Labs GmbH
|
2012-05-30 18:13:09 +00:00
|
|
|
*
|
|
|
|
* This file is part of the Genode OS framework, which is distributed
|
|
|
|
* under the terms of the GNU General Public License version 2.
|
|
|
|
*/
|
|
|
|
|
2013-11-14 16:29:34 +00:00
|
|
|
#ifndef _KERNEL__INTERFACE_H_
|
|
|
|
#define _KERNEL__INTERFACE_H_
|
2012-05-30 18:13:09 +00:00
|
|
|
|
|
|
|
/* Genode includes */
|
2013-11-14 16:29:34 +00:00
|
|
|
#include <kernel/interface_support.h>
|
2012-05-30 18:13:09 +00:00
|
|
|
|
|
|
|
namespace Genode
|
|
|
|
{
|
2013-11-18 11:47:14 +00:00
|
|
|
class Native_utcb;
|
2013-02-22 09:30:48 +00:00
|
|
|
class Platform_pd;
|
2012-12-10 12:55:19 +00:00
|
|
|
class Tlb;
|
2012-05-30 18:13:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
namespace Kernel
|
|
|
|
{
|
2012-12-19 13:46:48 +00:00
|
|
|
typedef Genode::Tlb Tlb;
|
|
|
|
typedef Genode::addr_t addr_t;
|
|
|
|
typedef Genode::size_t size_t;
|
2013-02-22 09:30:48 +00:00
|
|
|
typedef Genode::Platform_pd Platform_pd;
|
2013-11-18 11:47:14 +00:00
|
|
|
typedef Genode::Native_utcb Native_utcb;
|
2012-12-19 13:46:48 +00:00
|
|
|
|
2012-05-30 18:13:09 +00:00
|
|
|
/**
|
2013-11-14 12:29:47 +00:00
|
|
|
* Kernel names of all kernel calls
|
2012-05-30 18:13:09 +00:00
|
|
|
*/
|
2013-11-14 12:29:47 +00:00
|
|
|
struct Call_id
|
2012-05-30 18:13:09 +00:00
|
|
|
{
|
2013-11-14 12:29:47 +00:00
|
|
|
enum {
|
|
|
|
NEW_THREAD = 0,
|
|
|
|
DELETE_THREAD = 1,
|
|
|
|
START_THREAD = 2,
|
|
|
|
PAUSE_THREAD = 3,
|
|
|
|
RESUME_THREAD = 4,
|
2013-11-15 16:13:15 +00:00
|
|
|
CURRENT_THREAD_ID = 5,
|
|
|
|
YIELD_THREAD = 6,
|
|
|
|
ACCESS_THREAD_REGS = 7,
|
|
|
|
ROUTE_THREAD_EVENT = 8,
|
|
|
|
UPDATE_PD = 9,
|
|
|
|
UPDATE_REGION = 10,
|
|
|
|
NEW_PD = 11,
|
|
|
|
KILL_PD = 12,
|
|
|
|
REQUEST_AND_WAIT = 13,
|
|
|
|
REPLY = 14,
|
|
|
|
WAIT_FOR_REQUEST = 15,
|
|
|
|
NEW_SIGNAL_RECEIVER = 16,
|
|
|
|
NEW_SIGNAL_CONTEXT = 17,
|
|
|
|
KILL_SIGNAL_CONTEXT = 18,
|
|
|
|
KILL_SIGNAL_RECEIVER = 19,
|
|
|
|
SUBMIT_SIGNAL = 20,
|
|
|
|
AWAIT_SIGNAL = 21,
|
|
|
|
SIGNAL_PENDING = 22,
|
|
|
|
ACK_SIGNAL = 23,
|
|
|
|
NEW_VM = 24,
|
|
|
|
RUN_VM = 25,
|
|
|
|
PAUSE_VM = 26,
|
|
|
|
PRINT_CHAR = 27,
|
2013-11-14 12:29:47 +00:00
|
|
|
};
|
2012-05-30 18:13:09 +00:00
|
|
|
};
|
|
|
|
|
2013-11-14 12:29:47 +00:00
|
|
|
|
2012-10-23 15:12:09 +00:00
|
|
|
/*****************************************************************
|
2013-11-14 16:29:34 +00:00
|
|
|
** Kernel call with 1 to 6 arguments **
|
2012-10-23 15:12:09 +00:00
|
|
|
** **
|
|
|
|
** These functions must not be inline to ensure that objects, **
|
|
|
|
** wich are referenced by arguments, are tagged as "used" even **
|
|
|
|
** though only the pointer gets handled in here. **
|
|
|
|
*****************************************************************/
|
|
|
|
|
2013-11-14 16:29:34 +00:00
|
|
|
Call_ret call(Call_arg arg_0);
|
2012-10-23 15:12:09 +00:00
|
|
|
|
2013-11-14 16:29:34 +00:00
|
|
|
Call_ret call(Call_arg arg_0,
|
|
|
|
Call_arg arg_1);
|
2012-10-23 15:12:09 +00:00
|
|
|
|
2013-11-14 16:29:34 +00:00
|
|
|
Call_ret call(Call_arg arg_0,
|
|
|
|
Call_arg arg_1,
|
|
|
|
Call_arg arg_2);
|
2012-10-23 15:12:09 +00:00
|
|
|
|
2013-11-14 16:29:34 +00:00
|
|
|
Call_ret call(Call_arg arg_0,
|
|
|
|
Call_arg arg_1,
|
|
|
|
Call_arg arg_2,
|
|
|
|
Call_arg arg_3);
|
2012-10-23 15:12:09 +00:00
|
|
|
|
2013-11-14 16:29:34 +00:00
|
|
|
Call_ret call(Call_arg arg_0,
|
|
|
|
Call_arg arg_1,
|
|
|
|
Call_arg arg_2,
|
|
|
|
Call_arg arg_3,
|
|
|
|
Call_arg arg_4);
|
2012-10-23 15:12:09 +00:00
|
|
|
|
2013-11-14 16:29:34 +00:00
|
|
|
Call_ret call(Call_arg arg_0,
|
|
|
|
Call_arg arg_1,
|
|
|
|
Call_arg arg_2,
|
|
|
|
Call_arg arg_3,
|
|
|
|
Call_arg arg_4,
|
|
|
|
Call_arg arg_5);
|
2012-10-23 15:12:09 +00:00
|
|
|
|
2012-05-30 18:13:09 +00:00
|
|
|
/**
|
|
|
|
* Virtual range of the mode transition region in every PD
|
|
|
|
*/
|
2012-12-19 13:46:48 +00:00
|
|
|
addr_t mode_transition_virt_base();
|
|
|
|
size_t mode_transition_size();
|
2012-05-30 18:13:09 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Get sizes of the kernel objects
|
|
|
|
*/
|
2012-12-19 13:46:48 +00:00
|
|
|
size_t thread_size();
|
|
|
|
size_t pd_size();
|
|
|
|
size_t signal_context_size();
|
|
|
|
size_t signal_receiver_size();
|
|
|
|
size_t vm_size();
|
2012-05-30 18:13:09 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Get alignment constraints of the kernel objects
|
|
|
|
*/
|
|
|
|
unsigned kernel_pd_alignm_log2();
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
2013-09-18 11:12:32 +00:00
|
|
|
* Create a protection domain
|
2012-05-30 18:13:09 +00:00
|
|
|
*
|
2013-09-18 11:12:32 +00:00
|
|
|
* \param p appropriate memory donation for the kernel object
|
|
|
|
* \param pd core local Platform_pd object
|
2012-05-30 18:13:09 +00:00
|
|
|
*
|
2013-09-18 11:12:32 +00:00
|
|
|
* \retval >0 kernel name of the new protection domain
|
|
|
|
* \retval 0 failed
|
2012-05-30 18:13:09 +00:00
|
|
|
*
|
|
|
|
* Restricted to core threads. Regaining of the supplied memory is not
|
|
|
|
* supported by now.
|
|
|
|
*/
|
2013-09-18 11:12:32 +00:00
|
|
|
inline unsigned new_pd(void * const dst, Platform_pd * const pd)
|
|
|
|
{
|
2013-11-14 16:29:34 +00:00
|
|
|
return call(Call_id::NEW_PD, (Call_arg)dst, (Call_arg)pd);
|
2013-09-18 11:12:32 +00:00
|
|
|
}
|
2012-05-30 18:13:09 +00:00
|
|
|
|
2013-11-14 12:29:47 +00:00
|
|
|
|
2013-09-18 11:12:32 +00:00
|
|
|
/**
|
|
|
|
* Destruct a protection domain
|
|
|
|
*
|
2013-11-14 16:29:34 +00:00
|
|
|
* \param pd_id kernel name of the targeted protection domain
|
2013-09-18 11:12:32 +00:00
|
|
|
*
|
|
|
|
* \retval 0 succeeded
|
|
|
|
* \retval -1 failed
|
|
|
|
*/
|
2013-11-14 16:29:34 +00:00
|
|
|
inline int kill_pd(unsigned const pd_id)
|
2013-09-18 11:12:32 +00:00
|
|
|
{
|
2013-11-14 16:29:34 +00:00
|
|
|
return call(Call_id::KILL_PD, pd_id);
|
2013-09-18 11:12:32 +00:00
|
|
|
}
|
2012-05-30 18:13:09 +00:00
|
|
|
|
2013-11-14 12:29:47 +00:00
|
|
|
|
2012-05-30 18:13:09 +00:00
|
|
|
/**
|
|
|
|
* Propagate changes in PD configuration
|
|
|
|
*
|
|
|
|
* \param pd_id ID of the PD that has been configured
|
|
|
|
*
|
|
|
|
* It might be, that the kernel and/or the hardware caches parts of PD
|
2013-11-14 16:29:34 +00:00
|
|
|
* configurations such as virtual address translations. This function
|
2012-05-30 18:13:09 +00:00
|
|
|
* ensures that the current configuration of the targeted PD gets fully
|
2013-11-14 16:29:34 +00:00
|
|
|
* applied from the moment it returns to the userland. This function is
|
2012-05-30 18:13:09 +00:00
|
|
|
* inappropriate in case that a PD wants to change its own configuration.
|
2013-11-14 16:29:34 +00:00
|
|
|
* There's no need for this function after a configuration change that
|
2013-04-29 16:58:11 +00:00
|
|
|
* can't affect the kernel- and/or hardware-caches.
|
2012-05-30 18:13:09 +00:00
|
|
|
*
|
|
|
|
* Restricted to core threads.
|
|
|
|
*/
|
2013-11-14 12:29:47 +00:00
|
|
|
inline void update_pd(unsigned const pd_id)
|
|
|
|
{
|
2013-11-14 16:29:34 +00:00
|
|
|
call(Call_id::UPDATE_PD, pd_id);
|
2013-11-14 12:29:47 +00:00
|
|
|
}
|
|
|
|
|
2012-05-30 18:13:09 +00:00
|
|
|
|
2013-04-29 16:58:11 +00:00
|
|
|
/**
|
|
|
|
* Propagate memory-updates within a given virtual region
|
|
|
|
*
|
|
|
|
* \param base virtual base of the region
|
|
|
|
* \param size size of the region
|
|
|
|
*
|
|
|
|
* If one updates a memory region and must ensure that the update
|
2013-11-14 16:29:34 +00:00
|
|
|
* gets visible directly to other address spaces, this function does
|
2013-04-29 16:58:11 +00:00
|
|
|
* the job.
|
|
|
|
*
|
|
|
|
* Restricted to core threads.
|
|
|
|
*/
|
2013-11-14 16:29:34 +00:00
|
|
|
inline void update_region(addr_t const base, size_t const size)
|
2013-11-14 12:29:47 +00:00
|
|
|
{
|
2013-11-14 16:29:34 +00:00
|
|
|
call(Call_id::UPDATE_REGION, (Call_arg)base, (Call_arg)size);
|
2013-11-14 12:29:47 +00:00
|
|
|
}
|
|
|
|
|
2012-05-30 18:13:09 +00:00
|
|
|
|
|
|
|
/**
|
2013-11-18 14:31:54 +00:00
|
|
|
* Create kernel object that acts as thread that isn't executed initially
|
2012-05-30 18:13:09 +00:00
|
|
|
*
|
2013-11-18 14:31:54 +00:00
|
|
|
* \param p memory donation for the new kernel thread object
|
|
|
|
* \param priority scheduling priority of the new thread
|
|
|
|
* \param label debugging label of the new thread
|
2012-05-30 18:13:09 +00:00
|
|
|
*
|
2013-11-18 14:31:54 +00:00
|
|
|
* \retval >0 kernel name of the new thread
|
|
|
|
* \retval 0 failed
|
2012-05-30 18:13:09 +00:00
|
|
|
*
|
2013-11-18 14:31:54 +00:00
|
|
|
* Restricted to core threads.
|
2012-05-30 18:13:09 +00:00
|
|
|
*/
|
2013-11-18 14:31:54 +00:00
|
|
|
inline int new_thread(void * const p, unsigned const priority,
|
|
|
|
char const * const label)
|
2013-11-14 12:29:47 +00:00
|
|
|
{
|
2013-11-18 14:31:54 +00:00
|
|
|
return call((Call_arg)Call_id::NEW_THREAD, (Call_arg)p, (Call_arg)priority,
|
|
|
|
(Call_arg)label);
|
2013-11-14 12:29:47 +00:00
|
|
|
}
|
|
|
|
|
2012-05-30 18:13:09 +00:00
|
|
|
|
2012-10-09 13:41:40 +00:00
|
|
|
/**
|
|
|
|
* Delete an existing thread
|
|
|
|
*
|
|
|
|
* \param id kernel name of the targeted thread
|
|
|
|
*
|
|
|
|
* Restricted to core threads. After calling this, the memory that was
|
|
|
|
* granted beforehand by 'new_thread' to kernel for managing this thread
|
|
|
|
* is freed again.
|
|
|
|
*/
|
2013-11-14 16:29:34 +00:00
|
|
|
inline void delete_thread(unsigned const thread_id)
|
2013-11-14 12:29:47 +00:00
|
|
|
{
|
2013-11-14 16:29:34 +00:00
|
|
|
call(Call_id::DELETE_THREAD, thread_id);
|
2013-11-14 12:29:47 +00:00
|
|
|
}
|
|
|
|
|
2012-05-30 18:13:09 +00:00
|
|
|
|
|
|
|
/**
|
2013-11-18 11:47:14 +00:00
|
|
|
* Start executing a thread
|
2012-05-30 18:13:09 +00:00
|
|
|
*
|
2013-11-18 11:47:14 +00:00
|
|
|
* \param thread_id kernel name of targeted thread
|
|
|
|
* \param cpu_id kernel name of targeted processor
|
|
|
|
* \param pd_id kernel name of targeted protection domain
|
|
|
|
* \param utcb core local pointer to userland thread-context
|
2012-05-30 18:13:09 +00:00
|
|
|
*
|
|
|
|
* Restricted to core threads.
|
|
|
|
*/
|
2013-11-18 11:47:14 +00:00
|
|
|
inline Tlb * start_thread(unsigned const thread_id, unsigned const cpu_id,
|
|
|
|
unsigned const pd_id, Native_utcb * const utcb)
|
2012-05-30 18:13:09 +00:00
|
|
|
{
|
2013-11-18 11:47:14 +00:00
|
|
|
return (Tlb *)call(Call_id::START_THREAD, thread_id, cpu_id, pd_id,
|
|
|
|
(Call_arg)utcb);
|
2012-05-30 18:13:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Prevent thread from participating in CPU scheduling
|
|
|
|
*
|
|
|
|
* \param id ID of the targeted thread. If not set
|
|
|
|
* this will target the current thread.
|
|
|
|
*
|
2013-11-14 16:29:34 +00:00
|
|
|
* \retval 0 succeeded
|
|
|
|
* \retval -1 the targeted thread does not exist or is still active
|
2012-05-30 18:13:09 +00:00
|
|
|
*
|
|
|
|
* If the caller doesn't target itself, this is restricted to core threads.
|
|
|
|
*/
|
2013-11-14 12:29:47 +00:00
|
|
|
inline int pause_thread(unsigned const id = 0)
|
|
|
|
{
|
2013-11-14 16:29:34 +00:00
|
|
|
return call(Call_id::PAUSE_THREAD, id);
|
2013-11-14 12:29:47 +00:00
|
|
|
}
|
2012-05-30 18:13:09 +00:00
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Let an already started thread participate in CPU scheduling
|
|
|
|
*
|
|
|
|
* \param id ID of the targeted thread
|
|
|
|
*
|
2013-11-14 16:29:34 +00:00
|
|
|
* \retval 0 succeeded and thread was paused beforehand
|
|
|
|
* \retval 1 succeeded and thread was active beforehand
|
|
|
|
* \retval -1 failed
|
2013-05-22 12:41:47 +00:00
|
|
|
*
|
|
|
|
* If the targeted thread blocks for any event except a 'start_thread'
|
|
|
|
* call this call cancels the blocking.
|
2012-05-30 18:13:09 +00:00
|
|
|
*/
|
2013-11-14 12:29:47 +00:00
|
|
|
inline int resume_thread(unsigned const id = 0)
|
|
|
|
{
|
2013-11-14 16:29:34 +00:00
|
|
|
return call(Call_id::RESUME_THREAD, id);
|
2013-11-14 12:29:47 +00:00
|
|
|
}
|
2012-11-09 16:10:38 +00:00
|
|
|
|
|
|
|
|
2012-05-30 18:13:09 +00:00
|
|
|
/**
|
|
|
|
* Let the current thread give up its remaining timeslice
|
|
|
|
*
|
|
|
|
* \param id if this thread ID is set and valid this will resume the
|
|
|
|
* targeted thread additionally
|
|
|
|
*/
|
2013-11-14 12:29:47 +00:00
|
|
|
inline void yield_thread(unsigned const id = 0)
|
|
|
|
{
|
2013-11-14 16:29:34 +00:00
|
|
|
call(Call_id::YIELD_THREAD, id);
|
2013-11-14 12:29:47 +00:00
|
|
|
}
|
2012-05-30 18:13:09 +00:00
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Get the thread ID of the current thread
|
|
|
|
*/
|
2013-11-14 12:29:47 +00:00
|
|
|
inline int current_thread_id()
|
|
|
|
{
|
2013-11-14 16:29:34 +00:00
|
|
|
return call(Call_id::CURRENT_THREAD_ID);
|
2013-11-14 12:29:47 +00:00
|
|
|
}
|
2012-05-30 18:13:09 +00:00
|
|
|
|
|
|
|
|
2013-11-14 12:29:47 +00:00
|
|
|
/**
|
|
|
|
* Set or unset the handler of an event a kernel thread-object triggers
|
|
|
|
*
|
|
|
|
* \param thread_id kernel name of the targeted thread
|
|
|
|
* \param event_id kernel name of the targeted thread event
|
|
|
|
* \param signal_context_id kernel name of the handlers signal context
|
|
|
|
*
|
|
|
|
* Restricted to core threads.
|
|
|
|
*/
|
|
|
|
inline int route_thread_event(unsigned const thread_id,
|
|
|
|
unsigned const event_id,
|
|
|
|
unsigned const signal_context_id)
|
|
|
|
{
|
2013-11-14 16:29:34 +00:00
|
|
|
return call(Call_id::ROUTE_THREAD_EVENT, thread_id,
|
|
|
|
event_id, signal_context_id);
|
2013-09-18 20:33:56 +00:00
|
|
|
}
|
2012-05-30 18:13:09 +00:00
|
|
|
|
|
|
|
|
|
|
|
/**
|
2013-10-17 14:07:47 +00:00
|
|
|
* Send IPC request and await corresponding IPC reply
|
2012-05-30 18:13:09 +00:00
|
|
|
*
|
2013-10-16 22:41:14 +00:00
|
|
|
* \param id kernel name of the server thread
|
2012-05-30 18:13:09 +00:00
|
|
|
*
|
2013-10-17 14:07:47 +00:00
|
|
|
* As soon as call returns, callers UTCB provides received message.
|
2012-05-30 18:13:09 +00:00
|
|
|
*/
|
2013-10-17 14:07:47 +00:00
|
|
|
inline void request_and_wait(unsigned const id)
|
2013-10-07 14:01:03 +00:00
|
|
|
{
|
2013-11-14 16:29:34 +00:00
|
|
|
call(Call_id::REQUEST_AND_WAIT, id);
|
2013-10-07 14:01:03 +00:00
|
|
|
}
|
2012-05-30 18:13:09 +00:00
|
|
|
|
|
|
|
|
|
|
|
/**
|
2013-10-17 14:07:47 +00:00
|
|
|
* Await the receipt of a message
|
2012-05-30 18:13:09 +00:00
|
|
|
*
|
2013-10-17 14:07:47 +00:00
|
|
|
* \return type of received message
|
2013-10-17 11:51:17 +00:00
|
|
|
*
|
2013-10-17 14:07:47 +00:00
|
|
|
* As soon as call returns, callers UTCB provides received message.
|
2012-05-30 18:13:09 +00:00
|
|
|
*/
|
2013-10-17 14:07:47 +00:00
|
|
|
inline void wait_for_request()
|
2013-10-17 11:51:17 +00:00
|
|
|
{
|
2013-11-14 16:29:34 +00:00
|
|
|
call(Call_id::WAIT_FOR_REQUEST);
|
2013-10-17 11:51:17 +00:00
|
|
|
}
|
2012-05-30 18:13:09 +00:00
|
|
|
|
|
|
|
|
|
|
|
/**
|
2013-10-17 14:07:47 +00:00
|
|
|
* Reply to lastly received message
|
2012-05-30 18:13:09 +00:00
|
|
|
*
|
2013-10-17 14:07:47 +00:00
|
|
|
* \param await_message wether the call shall await receipt of a message
|
2013-10-17 11:51:17 +00:00
|
|
|
*
|
2013-10-17 14:07:47 +00:00
|
|
|
* If await_request = 1, callers UTCB provides received message
|
|
|
|
* as soon as call returns
|
2012-05-30 18:13:09 +00:00
|
|
|
*/
|
2013-10-17 14:07:47 +00:00
|
|
|
inline void reply(bool const await_message)
|
2013-10-17 11:51:17 +00:00
|
|
|
{
|
2013-11-14 16:29:34 +00:00
|
|
|
call(Call_id::REPLY, await_message);
|
2013-09-18 10:57:01 +00:00
|
|
|
}
|
|
|
|
|
2012-05-30 18:13:09 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Print a char 'c' to the kernels serial ouput
|
|
|
|
*/
|
|
|
|
inline void print_char(char const c)
|
2013-11-14 12:29:47 +00:00
|
|
|
{
|
2013-11-14 16:29:34 +00:00
|
|
|
call(Call_id::PRINT_CHAR, c);
|
2013-11-14 12:29:47 +00:00
|
|
|
}
|
2012-05-30 18:13:09 +00:00
|
|
|
|
|
|
|
|
|
|
|
/**
|
2013-11-11 12:03:07 +00:00
|
|
|
* Access plain member variables of a kernel thread-object
|
2012-05-30 18:13:09 +00:00
|
|
|
*
|
2013-11-11 12:03:07 +00:00
|
|
|
* \param thread_id kernel name of the targeted thread
|
|
|
|
* \param reads amount of read operations
|
|
|
|
* \param writes amount of write operations
|
|
|
|
* \param read_values base of value buffer for read operations
|
|
|
|
* \param write_values base of value buffer for write operations
|
2012-05-30 18:13:09 +00:00
|
|
|
*
|
2013-11-11 12:03:07 +00:00
|
|
|
* \retval 0 all operations done
|
|
|
|
* \retval >0 amount of undone operations
|
|
|
|
* \retval -1 failed to start processing operations
|
|
|
|
*
|
|
|
|
* Restricted to core threads. Operations are processed in order of the
|
|
|
|
* appearance of the register names in the callers UTCB. If reads = 0,
|
|
|
|
* read_values is of no relevance. If writes = 0, write_values is of no
|
|
|
|
* relevance.
|
2012-05-30 18:13:09 +00:00
|
|
|
*
|
2013-11-11 12:03:07 +00:00
|
|
|
* Expected structure at the callers UTCB base:
|
2012-05-30 18:13:09 +00:00
|
|
|
*
|
2013-11-11 12:03:07 +00:00
|
|
|
* 0 * sizeof(addr_t): read register name #1
|
|
|
|
* ... ...
|
|
|
|
* (reads - 1) * sizeof(addr_t): read register name #reads
|
|
|
|
* (reads - 0) * sizeof(addr_t): write register name #1
|
|
|
|
* ... ...
|
|
|
|
* (reads + writes - 1) * sizeof(addr_t): write register name #writes
|
|
|
|
*
|
|
|
|
* Expected structure at write_values:
|
|
|
|
*
|
|
|
|
* 0 * sizeof(addr_t): write value #1
|
|
|
|
* ... ...
|
|
|
|
* (writes - 1) * sizeof(addr_t): write value #writes
|
2012-05-30 18:13:09 +00:00
|
|
|
*/
|
2013-11-11 12:03:07 +00:00
|
|
|
inline int access_thread_regs(unsigned const thread_id,
|
|
|
|
unsigned const reads,
|
|
|
|
unsigned const writes,
|
|
|
|
addr_t * const read_values,
|
|
|
|
addr_t * const write_values)
|
|
|
|
{
|
2013-11-14 16:29:34 +00:00
|
|
|
return call(Call_id::ACCESS_THREAD_REGS, thread_id, reads, writes,
|
|
|
|
(Call_arg)read_values, (Call_arg)write_values);
|
2013-11-11 12:03:07 +00:00
|
|
|
}
|
2012-05-30 18:13:09 +00:00
|
|
|
|
|
|
|
|
|
|
|
/**
|
2013-11-14 12:29:47 +00:00
|
|
|
* Create a kernel object that acts as a signal receiver
|
2012-05-30 18:13:09 +00:00
|
|
|
*
|
2013-11-14 12:29:47 +00:00
|
|
|
* \param p memory donation for the kernel signal-receiver object
|
2012-05-30 18:13:09 +00:00
|
|
|
*
|
2013-09-11 22:48:27 +00:00
|
|
|
* \retval >0 kernel name of the new signal receiver
|
|
|
|
* \retval 0 failed
|
2012-05-30 18:13:09 +00:00
|
|
|
*
|
2013-09-11 22:48:27 +00:00
|
|
|
* Restricted to core threads.
|
2012-05-30 18:13:09 +00:00
|
|
|
*/
|
2013-09-11 22:48:27 +00:00
|
|
|
inline unsigned new_signal_receiver(addr_t const p)
|
|
|
|
{
|
2013-11-14 16:29:34 +00:00
|
|
|
return call(Call_id::NEW_SIGNAL_RECEIVER, p);
|
2013-09-11 22:48:27 +00:00
|
|
|
}
|
2012-05-30 18:13:09 +00:00
|
|
|
|
|
|
|
|
|
|
|
/**
|
2013-11-14 12:29:47 +00:00
|
|
|
* Create kernel object that acts as a signal context and assign it
|
2012-05-30 18:13:09 +00:00
|
|
|
*
|
2013-11-14 12:29:47 +00:00
|
|
|
* \param p memory donation for the kernel signal-context object
|
2013-09-11 22:48:27 +00:00
|
|
|
* \param receiver kernel name of targeted signal receiver
|
2013-11-14 12:29:47 +00:00
|
|
|
* \param imprint user label of the signal context
|
2012-05-30 18:13:09 +00:00
|
|
|
*
|
2013-09-11 22:48:27 +00:00
|
|
|
* \retval >0 kernel name of the new signal context
|
|
|
|
* \retval 0 failed
|
2012-05-30 18:13:09 +00:00
|
|
|
*
|
2013-09-11 22:48:27 +00:00
|
|
|
* Restricted to core threads.
|
2012-05-30 18:13:09 +00:00
|
|
|
*/
|
2013-09-11 22:48:27 +00:00
|
|
|
inline unsigned new_signal_context(addr_t const p,
|
|
|
|
unsigned const receiver,
|
|
|
|
unsigned const imprint)
|
2012-05-30 18:13:09 +00:00
|
|
|
{
|
2013-11-14 16:29:34 +00:00
|
|
|
return call(Call_id::NEW_SIGNAL_CONTEXT, p, receiver, imprint);
|
2012-05-30 18:13:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
2013-10-30 12:56:57 +00:00
|
|
|
* Await any context of a receiver and optionally ack a context before
|
2013-09-11 22:48:27 +00:00
|
|
|
*
|
2013-10-30 12:56:57 +00:00
|
|
|
* \param receiver_id kernel name of the targeted signal receiver
|
|
|
|
* \param context_id kernel name of a context that shall be acknowledged
|
2012-05-30 18:13:09 +00:00
|
|
|
*
|
2013-09-11 22:48:27 +00:00
|
|
|
* \retval 0 suceeded
|
|
|
|
* \retval -1 failed
|
2012-05-30 18:13:09 +00:00
|
|
|
*
|
2013-10-30 12:56:57 +00:00
|
|
|
* If context is set to 0, the call doesn't acknowledge any context.
|
2013-09-11 22:48:27 +00:00
|
|
|
* If this call returns 0, an instance of 'Signal::Data' is located at the
|
|
|
|
* base of the callers UTCB. Every occurence of a signal is provided
|
2013-10-30 12:56:57 +00:00
|
|
|
* through this function until it gets delivered through this function or
|
|
|
|
* context respectively receiver get destructed. If multiple threads
|
|
|
|
* listen at the same receiver, and/or multiple contexts of the receiver
|
|
|
|
* trigger simultanously, there is no assertion about wich thread
|
|
|
|
* receives, and from wich context. A context that delivered once doesn't
|
|
|
|
* deliver again unless its last delivery has been acknowledged via
|
|
|
|
* ack_signal.
|
|
|
|
*/
|
|
|
|
inline int await_signal(unsigned const receiver_id,
|
|
|
|
unsigned const context_id)
|
2013-09-11 22:48:27 +00:00
|
|
|
{
|
2013-11-14 16:29:34 +00:00
|
|
|
return call(Call_id::AWAIT_SIGNAL, receiver_id, context_id);
|
2013-09-11 22:48:27 +00:00
|
|
|
}
|
2012-05-30 18:13:09 +00:00
|
|
|
|
|
|
|
|
2012-10-24 14:03:31 +00:00
|
|
|
/**
|
2013-09-11 22:48:27 +00:00
|
|
|
* Return wether any context of a receiver is pending
|
2012-10-24 14:03:31 +00:00
|
|
|
*
|
2013-09-11 22:48:27 +00:00
|
|
|
* \param receiver kernel name of the targeted signal receiver
|
|
|
|
*
|
|
|
|
* \retval 0 none of the contexts is pending or the receiver doesn't exist
|
|
|
|
* \retval 1 a context of the signal receiver is pending
|
2012-10-24 14:03:31 +00:00
|
|
|
*/
|
2013-09-11 22:48:27 +00:00
|
|
|
inline bool signal_pending(unsigned const receiver)
|
|
|
|
{
|
2013-11-14 16:29:34 +00:00
|
|
|
return call(Call_id::SIGNAL_PENDING, receiver);
|
2013-09-11 22:48:27 +00:00
|
|
|
}
|
2012-10-24 14:03:31 +00:00
|
|
|
|
|
|
|
|
2012-05-30 18:13:09 +00:00
|
|
|
/**
|
|
|
|
* Trigger a specific signal context
|
|
|
|
*
|
2013-09-11 22:48:27 +00:00
|
|
|
* \param context kernel name of the targeted signal context
|
|
|
|
* \param num how often the context shall be triggered by this call
|
|
|
|
*
|
|
|
|
* \retval 0 suceeded
|
|
|
|
* \retval -1 failed
|
2012-05-30 18:13:09 +00:00
|
|
|
*/
|
2013-09-11 22:48:27 +00:00
|
|
|
inline int submit_signal(unsigned const context, unsigned const num)
|
|
|
|
{
|
2013-11-14 16:29:34 +00:00
|
|
|
return call(Call_id::SUBMIT_SIGNAL, context, num);
|
2013-09-11 22:48:27 +00:00
|
|
|
}
|
2012-10-02 12:27:32 +00:00
|
|
|
|
2013-11-14 12:29:47 +00:00
|
|
|
|
2013-02-18 12:58:09 +00:00
|
|
|
/**
|
2013-09-11 22:48:27 +00:00
|
|
|
* Acknowledge the processing of the last delivery of a signal context
|
2013-02-18 12:58:09 +00:00
|
|
|
*
|
2013-09-11 22:48:27 +00:00
|
|
|
* \param context kernel name of the targeted signal context
|
2013-02-18 12:58:09 +00:00
|
|
|
*/
|
2013-09-11 22:48:27 +00:00
|
|
|
inline void ack_signal(unsigned const context)
|
|
|
|
{
|
2013-11-14 16:29:34 +00:00
|
|
|
call(Call_id::ACK_SIGNAL, context);
|
2013-09-11 22:48:27 +00:00
|
|
|
}
|
2013-02-18 12:58:09 +00:00
|
|
|
|
2013-11-14 12:29:47 +00:00
|
|
|
|
2013-02-18 12:58:09 +00:00
|
|
|
/**
|
|
|
|
* Destruct a signal context
|
|
|
|
*
|
2013-09-11 22:48:27 +00:00
|
|
|
* \param context kernel name of the targeted signal context
|
2013-02-18 12:58:09 +00:00
|
|
|
*
|
2013-09-11 22:48:27 +00:00
|
|
|
* \retval 0 suceeded
|
|
|
|
* \retval -1 failed
|
2013-05-22 12:41:47 +00:00
|
|
|
*
|
2013-09-11 22:48:27 +00:00
|
|
|
* Restricted to core threads.
|
|
|
|
*/
|
|
|
|
inline int kill_signal_context(unsigned const context)
|
|
|
|
{
|
2013-11-14 16:29:34 +00:00
|
|
|
return call(Call_id::KILL_SIGNAL_CONTEXT, context);
|
2013-09-11 22:48:27 +00:00
|
|
|
}
|
|
|
|
|
2013-11-14 12:29:47 +00:00
|
|
|
|
2013-09-11 22:48:27 +00:00
|
|
|
/**
|
|
|
|
* Destruct a signal receiver
|
|
|
|
*
|
|
|
|
* \param receiver kernel name of the targeted signal receiver
|
|
|
|
*
|
|
|
|
* \retval 0 suceeded
|
|
|
|
* \retval -1 failed
|
2013-09-11 21:30:57 +00:00
|
|
|
*
|
|
|
|
* Restricted to core threads.
|
2013-02-18 12:58:09 +00:00
|
|
|
*/
|
2013-09-11 22:48:27 +00:00
|
|
|
inline int kill_signal_receiver(unsigned const receiver)
|
|
|
|
{
|
2013-11-14 16:29:34 +00:00
|
|
|
return call(Call_id::KILL_SIGNAL_RECEIVER, receiver);
|
2013-09-11 22:48:27 +00:00
|
|
|
}
|
2012-10-02 12:27:32 +00:00
|
|
|
|
2013-11-14 12:29:47 +00:00
|
|
|
|
2012-10-02 12:27:32 +00:00
|
|
|
/**
|
2013-11-14 16:29:34 +00:00
|
|
|
* Create a virtual machine that is stopped initially
|
2012-10-02 12:27:32 +00:00
|
|
|
*
|
2013-11-14 16:29:34 +00:00
|
|
|
* \param dst memory donation for the kernel VM-object
|
|
|
|
* \param state location of the CPU state of the VM
|
|
|
|
* \param signal_context_id kernel name of the signal context for VM events
|
2012-10-02 12:27:32 +00:00
|
|
|
*
|
2013-11-14 16:29:34 +00:00
|
|
|
* \retval >0 kernel name of the new VM
|
|
|
|
* \retval 0 failed
|
2012-10-02 12:27:32 +00:00
|
|
|
*
|
|
|
|
* Restricted to core threads. Regaining of the supplied memory is not
|
|
|
|
* supported by now.
|
|
|
|
*/
|
|
|
|
inline int new_vm(void * const dst, void * const state,
|
2013-11-14 16:29:34 +00:00
|
|
|
unsigned const signal_context_id)
|
2012-10-02 12:27:32 +00:00
|
|
|
{
|
2013-11-14 16:29:34 +00:00
|
|
|
return call(Call_id::NEW_VM, (Call_arg)dst, (Call_arg)state,
|
|
|
|
signal_context_id);
|
2012-10-02 12:27:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Execute a virtual-machine (again)
|
|
|
|
*
|
2013-11-14 16:29:34 +00:00
|
|
|
* \param vm_id kernel name of the targeted VM
|
2012-12-19 13:46:48 +00:00
|
|
|
*
|
|
|
|
* Restricted to core threads.
|
2012-10-02 12:27:32 +00:00
|
|
|
*/
|
2013-11-14 16:29:34 +00:00
|
|
|
inline void run_vm(unsigned const vm_id)
|
2013-11-14 12:29:47 +00:00
|
|
|
{
|
2013-11-14 16:29:34 +00:00
|
|
|
call(Call_id::RUN_VM, vm_id);
|
2013-11-14 12:29:47 +00:00
|
|
|
}
|
2013-05-08 13:22:28 +00:00
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Stop execution of a virtual-machine
|
|
|
|
*
|
2013-11-14 16:29:34 +00:00
|
|
|
* \param vm_id kernel name of the targeted VM
|
2013-05-08 13:22:28 +00:00
|
|
|
*
|
|
|
|
* Restricted to core threads.
|
|
|
|
*/
|
2013-11-14 16:29:34 +00:00
|
|
|
inline void pause_vm(unsigned const vm_id)
|
2013-11-14 12:29:47 +00:00
|
|
|
{
|
2013-11-14 16:29:34 +00:00
|
|
|
call(Call_id::PAUSE_VM, vm_id);
|
2013-11-14 12:29:47 +00:00
|
|
|
}
|
2012-05-30 18:13:09 +00:00
|
|
|
}
|
|
|
|
|
2013-11-14 16:29:34 +00:00
|
|
|
#endif /* _KERNEL__INTERFACE_H_ */
|
2012-05-30 18:13:09 +00:00
|
|
|
|