init: support dynamic cap-quota adjustment

This patch makes init's dynamic quota balancing mechanism available for
capability quotas.

Fixes #2852
This commit is contained in:
Norman Feske 2018-06-04 15:49:29 +02:00 committed by Christian Helmuth
parent 639c838707
commit 950b270e74
6 changed files with 550 additions and 106 deletions

View File

@ -852,6 +852,331 @@ append config {
</node> </node>
</node> </node>
</expect_init_state> </expect_init_state>
<init_config>
<parent-provides>
<service name="ROM"/>
<service name="CPU"/>
<service name="PD"/>
<service name="LOG"/>
</parent-provides>
</init_config>
<sleep ms="150"/>
<message string="test capability-quota adjustments"/>
<init_config>
<report child_caps="yes"/>
<parent-provides>
<service name="ROM"/>
<service name="CPU"/>
<service name="PD"/>
<service name="LOG"/>
</parent-provides>
<start name="test" caps="100">
<binary name="dummy"/>
<resource name="RAM" quantum="1M"/>
<config version="initial" />
<route> <any-service> <parent/> </any-service> </route>
</start>
</init_config>
<expect_log string="[init -> test] config 1: initial"/>
<sleep ms="150"/>
<expect_init_state>
<node name="child"> <attribute name="name" value="test"/>
<node name="caps">
<attribute name="assigned" value="100"/>
<attribute name="quota" lower="100"/>
</node>
</node>
</expect_init_state>
<!-- increase capability quota of child by 300 -->
<init_config>
<report init_caps="yes" child_caps="yes"/>
<parent-provides>
<service name="ROM"/>
<service name="CPU"/>
<service name="PD"/>
<service name="LOG"/>
</parent-provides>
<start name="test" caps="400">
<binary name="dummy"/>
<resource name="RAM" quantum="1M"/>
<config version="upgraded" />
<route> <any-service> <parent/> </any-service> </route>
</start>
</init_config>
<expect_log string="[init -> test] config 2: upgraded"/>
<sleep ms="150"/>
<expect_init_state>
<node name="child"> <attribute name="name" value="test"/>
<node name="caps">
<attribute name="assigned" value="400"/>
<attribute name="quota" higher="300"/>
</node>
</node>
</expect_init_state>
<!-- start second child consuming all slack capabilities -->
<init_config>
<report init_caps="yes" child_caps="yes"/>
<parent-provides>
<service name="ROM"/>
<service name="CPU"/>
<service name="PD"/>
<service name="LOG"/>
</parent-provides>
<start name="test" caps="400">
<binary name="dummy"/>
<resource name="RAM" quantum="4M"/>
<config/>
<route> <any-service> <parent/> </any-service> </route>
</start>
<start name="greedy" caps="1000000">
<binary name="dummy"/>
<resource name="RAM" quantum="4M"/>
<config version="started"/>
<route> <any-service> <parent/> </any-service> </route>
</start>
</init_config>
<expect_log string="[init -> greedy] config 1: started"/>
<sleep ms="150"/>
<expect_init_state>
<node name="caps"> <attribute name="avail" lower="1000"/> </node>
</expect_init_state>
<!-- attempt to upgrade the 'test' child to 800 caps, hitting the limit -->
<init_config>
<report init_caps="yes" child_caps="yes"/>
<parent-provides>
<service name="ROM"/>
<service name="CPU"/>
<service name="PD"/>
<service name="LOG"/>
</parent-provides>
<start name="test" caps="800">
<binary name="dummy"/>
<resource name="RAM" quantum="4M"/>
<config version="upgrade impossible"/>
<route> <any-service> <parent/> </any-service> </route>
</start>
<start name="greedy" caps="1000000">
<binary name="dummy"/>
<resource name="RAM" quantum="4M"/>
<config version="started"/>
<route> <any-service> <parent/> </any-service> </route>
</start>
</init_config>
<expect_log string="[init -> test] config 4: upgrade impossible"/>
<sleep ms="150"/>
<expect_init_state>
<node name="child"> <attribute name="name" value="test"/>
<node name="caps">
<attribute name="assigned" lower="800"/>
</node>
</node>
</expect_init_state>
<!-- kill greedy child, now the upgrade of 'test' can be completed -->
<init_config>
<report init_caps="yes" child_caps="yes"/>
<parent-provides>
<service name="ROM"/>
<service name="CPU"/>
<service name="PD"/>
<service name="LOG"/>
</parent-provides>
<default caps="100"/>
<start name="test" caps="800">
<binary name="dummy"/>
<resource name="RAM" quantum="4M"/>
<config version="upgraded to 800 caps"/>
<route> <any-service> <parent/> </any-service> </route>
</start>
</init_config>
<expect_log string="[init -> test] config 5: upgraded to 800 caps"/>
<sleep ms="150"/>
<expect_init_state>
<node name="child"> <attribute name="name" value="test"/>
<node name="caps">
<attribute name="assigned" value="800"/>
<attribute name="quota" higher="700"/>
</node>
</node>
</expect_init_state>
<!-- reduce quota -->
<init_config>
<report init_caps="yes" child_caps="yes"/>
<parent-provides>
<service name="ROM"/>
<service name="CPU"/>
<service name="PD"/>
<service name="LOG"/>
</parent-provides>
<start name="test" caps="200">
<binary name="dummy"/>
<resource name="RAM" quantum="4M"/>
<config version="downgraded to 200 caps"/>
<route> <any-service> <parent/> </any-service> </route>
</start>
</init_config>
<expect_log string="[init -> test] config 6: downgraded to 200 caps"/>
<sleep ms="150"/>
<expect_init_state>
<node name="child"> <attribute name="name" value="test"/>
<node name="caps">
<attribute name="assigned" value="200"/>
<attribute name="quota" lower="200"/>
</node>
</node>
</expect_init_state>
<!-- let child consume quota -->
<init_config version="consumed">
<parent-provides>
<service name="ROM"/>
<service name="CPU"/>
<service name="PD"/>
<service name="LOG"/>
</parent-provides>
<start name="test" caps="200">
<binary name="dummy"/>
<resource name="RAM" quantum="4M"/>
<config version="consume 100 caps">
<handle_yield_requests/>
<consume_caps amount="100"/>
</config>
<route> <any-service> <parent/> </any-service> </route>
</start>
</init_config>
<expect_log string="[init -> test] config 7: consume 100 caps"/>
<expect_log string="[init -> test] consume 100 caps"/>
<sleep ms="1000"/>
<!-- activate child_caps report -->
<init_config version="report consumed">
<report child_caps="yes"/>
<parent-provides>
<service name="ROM"/>
<service name="CPU"/>
<service name="PD"/>
<service name="LOG"/>
</parent-provides>
<start name="test" caps="200">
<binary name="dummy"/>
<resource name="RAM" quantum="4M"/>
<config version="consume 100 caps">
<handle_yield_requests/>
<consume_caps amount="100"/>
</config>
<route> <any-service> <parent/> </any-service> </route>
</start>
</init_config>
<sleep ms="200"/>
<expect_init_state>
<node name="child"> <attribute name="name" value="test"/>
<node name="caps">
<attribute name="avail" lower="100"/>
</node>
</node>
</expect_init_state>
<!-- reduce child quota by 100 caps, triggering a resource-yield request -->
<init_config>
<report init_caps="yes" child_caps="yes"/>
<parent-provides>
<service name="ROM"/>
<service name="CPU"/>
<service name="PD"/>
<service name="LOG"/>
</parent-provides>
<start name="test" caps="100">
<binary name="dummy"/>
<resource name="RAM" quantum="4M"/>
<config version="consume 100 caps">
<handle_yield_requests/>
<consume_caps amount="100"/>
</config>
<route> <any-service> <parent/> </any-service> </route>
</start>
</init_config>
<expect_log string="[init -> test] got yield request"/>
<expect_log string="[init -> test] release 100 caps"/>
<sleep ms="500"/>
<expect_init_state>
<node name="child"> <attribute name="name" value="test"/>
<node name="caps"> <attribute name="quota" lower="100"/> </node>
</node>
</expect_init_state>
<!-- let child issue a resource request -->
<init_config version="resources requested">
<report init_caps="yes" child_caps="yes" delay_ms="2000"/>
<parent-provides>
<service name="ROM"/>
<service name="CPU"/>
<service name="PD"/>
<service name="LOG"/>
</parent-provides>
<start name="test" caps="100">
<binary name="dummy"/>
<resource name="RAM" quantum="4M"/>
<config version="request more quota than avail">
<consume_caps amount="100"/>
</config>
<route> <any-service> <parent/> </any-service> </route>
</start>
</init_config>
<expect_log string="[init -> test] config 8: request more quota than avail"/>
<sleep ms="2000"/>
<expect_init_state>
<node name="child"> <attribute name="name" value="test"/>
<node name="caps">
<attribute name="assigned" value="100"/>
<attribute name="requested" higher="1"/>
</node>
</node>
</expect_init_state>
<!-- respond to resource request-->
<init_config>
<report init_caps="yes" child_caps="yes"/>
<parent-provides>
<service name="ROM"/>
<service name="CPU"/>
<service name="PD"/>
<service name="LOG"/>
</parent-provides>
<start name="test" caps="200">
<binary name="dummy"/>
<resource name="RAM" quantum="4M"/>
<config version="request more quota than avail">
<consume_caps amount="100"/>
</config>
<route> <any-service> <parent/> </any-service> </route>
</start>
</init_config>
<sleep ms="150"/>
<expect_init_state>
<node name="child"> <attribute name="name" value="test"/>
<node name="caps">
<attribute name="assigned" value="200"/>
<attribute name="quota" higher="100"/>
</node>
</node>
</expect_init_state>
<init_config>
<parent-provides>
<service name="ROM"/>
<service name="CPU"/>
<service name="PD"/>
<service name="LOG"/>
</parent-provides>
</init_config>
<sleep ms="150"/>
<message string="forward session request to children"/> <message string="forward session request to children"/>
@ -1011,11 +1336,11 @@ append config {
<expect_init_state> <expect_init_state>
<node name="child"> <node name="child">
<attribute name="name" value="server"/> <attribute name="name" value="server"/>
<attribute name="id" value="21"/> <attribute name="id" value="23"/>
</node> </node>
<node name="child"> <node name="child">
<attribute name="name" value="client"/> <attribute name="name" value="client"/>
<attribute name="id" value="22"/> <attribute name="id" value="24"/>
</node> </node>
</expect_init_state> </expect_init_state>
<sleep ms="150"/> <sleep ms="150"/>
@ -1046,7 +1371,7 @@ append config {
<expect_init_state> <expect_init_state>
<node name="child"> <node name="child">
<attribute name="name" value="client"/> <attribute name="name" value="client"/>
<attribute name="id" value="23"/> <attribute name="id" value="25"/>
</node> </node>
</expect_init_state> </expect_init_state>
<sleep ms="150"/> <sleep ms="150"/>
@ -1109,6 +1434,7 @@ append config {
<expect_log string="[init -> server] [dummy] started version 2"/> <expect_log string="[init -> server] [dummy] started version 2"/>
<sleep ms="150"/> <sleep ms="150"/>
<message string="test complete"/> <message string="test complete"/>
</config> </config>
@ -1152,5 +1478,5 @@ build_boot_image $boot_modules
append qemu_args " -nographic " append qemu_args " -nographic "
run_genode_until {.*child "test-init" exited with exit value 0.*} 170 run_genode_until {.*child "test-init" exited with exit value 0.*} 200

View File

@ -25,6 +25,7 @@ namespace Dummy {
struct Log_service; struct Log_service;
struct Log_connections; struct Log_connections;
struct Ram_consumer; struct Ram_consumer;
struct Cap_consumer;
struct Resource_yield_handler; struct Resource_yield_handler;
struct Main; struct Main;
using namespace Genode; using namespace Genode;
@ -188,24 +189,79 @@ struct Dummy::Ram_consumer
}; };
struct Dummy::Cap_consumer
{
Entrypoint &_ep;
size_t _amount = 0;
struct Interface : Genode::Interface { GENODE_RPC_INTERFACE(); };
struct Object : Genode::Rpc_object<Interface>
{
Entrypoint &_ep;
Object(Entrypoint &ep) : _ep(ep) { _ep.manage(*this); }
~Object() { _ep.dissolve(*this); }
};
/*
* Statically allocate an array of RPC objects to avoid RAM allocations
* as side effect during the cap-consume step.
*/
static constexpr size_t MAX = 200;
Constructible<Object> _objects[MAX];
Cap_consumer(Entrypoint &ep) : _ep(ep) { }
void release()
{
if (!_amount)
return;
log("release ", _amount, " caps");
for (unsigned i = 0; i < MAX; i++)
_objects[i].destruct();
_amount = 0;
}
void consume(size_t amount)
{
if (_amount)
release();
log("consume ", amount, " caps");
for (unsigned i = 0; i < min(amount, MAX); i++)
_objects[i].construct(_ep);
_amount = amount;
}
};
struct Dummy::Resource_yield_handler struct Dummy::Resource_yield_handler
{ {
Env &_env; Env &_env;
Ram_consumer &_ram_consumer; Ram_consumer &_ram_consumer;
Cap_consumer &_cap_consumer;
void _handle_yield() void _handle_yield()
{ {
log("got yield request"); log("got yield request");
_ram_consumer.release(); _ram_consumer.release();
_cap_consumer.release();
_env.parent().yield_response(); _env.parent().yield_response();
} }
Signal_handler<Resource_yield_handler> _yield_handler { Signal_handler<Resource_yield_handler> _yield_handler {
_env.ep(), *this, &Resource_yield_handler::_handle_yield }; _env.ep(), *this, &Resource_yield_handler::_handle_yield };
Resource_yield_handler(Env &env, Ram_consumer &ram_consumer) Resource_yield_handler(Env &env,
: _env(env), _ram_consumer(ram_consumer) Ram_consumer &ram_consumer, Cap_consumer &cap_consumer)
:
_env(env), _ram_consumer(ram_consumer), _cap_consumer(cap_consumer)
{ {
_env.parent().yield_sigh(_yield_handler); _env.parent().yield_sigh(_yield_handler);
} }
@ -229,6 +285,7 @@ struct Dummy::Main
Signal_handler<Main> _config_handler { _env.ep(), *this, &Main::_handle_config }; Signal_handler<Main> _config_handler { _env.ep(), *this, &Main::_handle_config };
Ram_consumer _ram_consumer { _env.ram() }; Ram_consumer _ram_consumer { _env.ram() };
Cap_consumer _cap_consumer { _env.ep() };
Constructible<Resource_yield_handler> _resource_yield_handler { }; Constructible<Resource_yield_handler> _resource_yield_handler { };
@ -260,8 +317,11 @@ struct Dummy::Main
if (node.type() == "consume_ram") if (node.type() == "consume_ram")
_ram_consumer.consume(node.attribute_value("amount", Number_of_bytes())); _ram_consumer.consume(node.attribute_value("amount", Number_of_bytes()));
if (node.type() == "consume_caps")
_cap_consumer.consume(node.attribute_value("amount", 0UL));
if (node.type() == "handle_yield_requests") if (node.type() == "handle_yield_requests")
_resource_yield_handler.construct(_env, _ram_consumer); _resource_yield_handler.construct(_env, _ram_consumer, _cap_consumer);
if (node.type() == "sleep") { if (node.type() == "sleep") {

View File

@ -163,13 +163,11 @@ Init::Child::apply_config(Xml_node start_node)
} }
static Init::Ram_quota assigned_ram_from_start_node(Genode::Xml_node start_node) Init::Ram_quota Init::Child::_configured_ram_quota() const
{ {
using namespace Init;
size_t assigned = 0; size_t assigned = 0;
start_node.for_each_sub_node("resource", [&] (Xml_node resource) { _start_node->xml().for_each_sub_node("resource", [&] (Xml_node resource) {
if (resource.attribute_value("name", String<8>()) == "RAM") if (resource.attribute_value("name", String<8>()) == "RAM")
assigned = resource.attribute_value("quantum", Number_of_bytes()); }); assigned = resource.attribute_value("quantum", Number_of_bytes()); });
@ -177,38 +175,48 @@ static Init::Ram_quota assigned_ram_from_start_node(Genode::Xml_node start_node)
} }
void Init::Child::apply_ram_upgrade() Init::Cap_quota Init::Child::_configured_cap_quota() const
{ {
Ram_quota const assigned_ram_quota = assigned_ram_from_start_node(_start_node->xml()); size_t const default_caps = _default_caps_accessor.default_caps().value;
if (assigned_ram_quota.value <= _resources.assigned_ram_quota.value) return Cap_quota { _start_node->xml().attribute_value("caps", default_caps) };
}
template <typename QUOTA, typename LIMIT_ACCESSOR>
void Init::Child::_apply_resource_upgrade(QUOTA &assigned, QUOTA const configured,
LIMIT_ACCESSOR const &limit_accessor)
{
if (configured.value <= assigned.value)
return; return;
size_t const increase = assigned_ram_quota.value QUOTA const limit = limit_accessor.resource_limit(QUOTA{});
- _resources.assigned_ram_quota.value; size_t const increment = configured.value - assigned.value;
size_t const limit = _ram_limit_accessor.ram_limit().value;
size_t const transfer = min(increase, limit);
if (increase > limit)
warning(name(), ": assigned RAM exceeds available RAM");
/* /*
* Remember assignment and apply RAM upgrade to child * If the configured quota exceeds our own quota, we donate all remaining
* * quota to the child.
* Note that we remember the actually transferred amount as the
* assigned amount. In the case where the value is clamped to to
* the limit, the value as given in the config remains diverged
* from the assigned value. This way, a future config update will
* attempt the completion of the upgrade if memory become
* available.
*/ */
if (transfer) { if (increment > limit.value)
_resources.assigned_ram_quota = if (_verbose.enabled())
Ram_quota { _resources.assigned_ram_quota.value + transfer }; warn_insuff_quota(limit.value);
_check_ram_constraints(_ram_limit_accessor.ram_limit()); QUOTA const transfer { min(increment, limit.value) };
ref_pd().transfer_quota(_child.pd_session_cap(), Ram_quota{transfer}); /*
* Remember assignment and apply upgrade to child
*
* Note that we remember the actually transferred amount as the assigned
* amount. In the case where the value is clamped to to the limit, the
* value as given in the config remains diverged from the assigned value.
* This way, a future config update will attempt the completion of the
* upgrade if memory become available.
*/
if (transfer.value) {
assigned.value += transfer.value;
ref_pd().transfer_quota(_child.pd_session_cap(), transfer);
/* wake up child that blocks on a resource request */ /* wake up child that blocks on a resource request */
if (_requested_resources.constructed()) { if (_requested_resources.constructed()) {
@ -219,57 +227,91 @@ void Init::Child::apply_ram_upgrade()
} }
void Init::Child::apply_ram_downgrade() void Init::Child::apply_upgrade()
{ {
Ram_quota const assigned_ram_quota = assigned_ram_from_start_node(_start_node->xml()); if (_resources.effective_ram_quota().value == 0)
warning(name(), ": no valid RAM quota defined");
if (assigned_ram_quota.value >= _resources.assigned_ram_quota.value) _apply_resource_upgrade(_resources.assigned_ram_quota,
_configured_ram_quota(), _ram_limit_accessor);
if (_resources.effective_cap_quota().value == 0)
warning(name(), ": no valid capability quota defined");
_apply_resource_upgrade(_resources.assigned_cap_quota,
_configured_cap_quota(), _cap_limit_accessor);
}
template <typename QUOTA, typename CHILD_AVAIL_QUOTA_FN>
void Init::Child::_apply_resource_downgrade(QUOTA &assigned, QUOTA const configured,
QUOTA const preserved,
CHILD_AVAIL_QUOTA_FN const &child_avail_quota_fn)
{
if (configured.value >= assigned.value)
return; return;
size_t const decrease = _resources.assigned_ram_quota.value QUOTA const decrement { assigned.value - configured.value };
- assigned_ram_quota.value;
/* /*
* The child may concurrently consume quota from its RAM session, * The child may concurrently consume quota from its PD session,
* causing the 'transfer_quota' to fail. For this reason, we repeatedly * causing the 'transfer_quota' to fail. For this reason, we repeatedly
* attempt the transfer. * attempt the transfer.
*/ */
unsigned max_attempts = 4, attempts = 0; unsigned max_attempts = 4, attempts = 0;
for (; attempts < max_attempts; attempts++) { for (; attempts < max_attempts; attempts++) {
/* give up if the child's available RAM is exhausted */ /* give up if the child's available quota is exhausted */
size_t const preserved = 16*1024; size_t const avail = child_avail_quota_fn().value;
size_t const avail = _child.ram().avail_ram().value; if (avail < preserved.value)
if (avail < preserved)
break; break;
size_t const transfer = min(avail - preserved, decrease); QUOTA const transfer { min(avail - preserved.value, decrement.value) };
try { try {
_child.pd().transfer_quota(ref_pd_cap(), Ram_quota{transfer}); _child.pd().transfer_quota(ref_pd_cap(), transfer);
_resources.assigned_ram_quota = assigned.value -= transfer.value;
Ram_quota { _resources.assigned_ram_quota.value - transfer };
break; break;
} catch (...) { } } catch (...) { }
} }
if (attempts == max_attempts) if (attempts == max_attempts)
warning(name(), ": RAM downgrade failed after ", max_attempts, " attempts"); warning(name(), ": downgrade failed after ", max_attempts, " attempts");
}
void Init::Child::apply_downgrade()
{
Ram_quota const configured_ram_quota = _configured_ram_quota();
Cap_quota const configured_cap_quota = _configured_cap_quota();
_apply_resource_downgrade(_resources.assigned_ram_quota,
configured_ram_quota, Ram_quota{16*1024},
[&] () { return _child.pd().avail_ram(); });
_apply_resource_downgrade(_resources.assigned_cap_quota,
configured_cap_quota, Cap_quota{5},
[&] () { return _child.pd().avail_caps(); });
/* /*
* If designated RAM quota is lower than the child's consumed RAM, issue * If designated resource quota is lower than the child's consumed quota,
* a yield request to the child. * issue a yield request to the child.
*/ */
if (assigned_ram_quota.value < _resources.assigned_ram_quota.value) { size_t demanded_ram_quota = 0;
size_t demanded_cap_quota = 0;
size_t const demanded = _resources.assigned_ram_quota.value if (configured_ram_quota.value < _resources.assigned_ram_quota.value)
- assigned_ram_quota.value; demanded_ram_quota = _resources.assigned_ram_quota.value - configured_ram_quota.value;
Parent::Resource_args const args { "ram_quota=", Number_of_bytes(demanded) }; if (configured_cap_quota.value < _resources.assigned_cap_quota.value)
demanded_cap_quota = _resources.assigned_cap_quota.value - configured_cap_quota.value;
if (demanded_ram_quota || demanded_cap_quota) {
Parent::Resource_args const
args { "ram_quota=", Number_of_bytes(demanded_ram_quota), ", ",
"cap_quota=", demanded_cap_quota};
_child.yield(args); _child.yield(args);
} }
} }
@ -657,6 +699,7 @@ Init::Child::Child(Env &env,
Ram_quota ram_limit, Ram_quota ram_limit,
Cap_quota cap_limit, Cap_quota cap_limit,
Ram_limit_accessor &ram_limit_accessor, Ram_limit_accessor &ram_limit_accessor,
Cap_limit_accessor &cap_limit_accessor,
Prio_levels prio_levels, Prio_levels prio_levels,
Affinity::Space const &affinity_space, Affinity::Space const &affinity_space,
Registry<Parent_service> &parent_services, Registry<Parent_service> &parent_services,
@ -667,12 +710,13 @@ Init::Child::Child(Env &env,
_list_element(this), _list_element(this),
_start_node(_alloc, start_node), _start_node(_alloc, start_node),
_default_route_accessor(default_route_accessor), _default_route_accessor(default_route_accessor),
_default_caps_accessor(default_caps_accessor),
_ram_limit_accessor(ram_limit_accessor), _ram_limit_accessor(ram_limit_accessor),
_cap_limit_accessor(cap_limit_accessor),
_name_registry(name_registry), _name_registry(name_registry),
_resources(_resources_from_start_node(start_node, prio_levels, affinity_space, _resources(_resources_from_start_node(start_node, prio_levels, affinity_space,
default_caps_accessor.default_caps(), cap_limit)), default_caps_accessor.default_caps(), cap_limit)),
_resources_checked((_check_ram_constraints(ram_limit), _resources_clamped_to_limit((_clamp_resources(ram_limit, cap_limit), true)),
_check_cap_constraints(cap_limit), true)),
_parent_services(parent_services), _parent_services(parent_services),
_child_services(child_services), _child_services(child_services),
_session_requester(_env.ep().rpc_ep(), _env.ram(), _env.rm()) _session_requester(_env.ep().rpc_ep(), _env.ram(), _env.rm())

View File

@ -48,7 +48,18 @@ class Init::Child : Child_policy, Routed_service::Wakeup
struct Default_route_accessor : Interface { virtual Xml_node default_route() = 0; }; struct Default_route_accessor : Interface { virtual Xml_node default_route() = 0; };
struct Default_caps_accessor : Interface { virtual Cap_quota default_caps() = 0; }; struct Default_caps_accessor : Interface { virtual Cap_quota default_caps() = 0; };
struct Ram_limit_accessor : Interface { virtual Ram_quota ram_limit() = 0; };
template <typename QUOTA>
struct Resource_limit_accessor : Interface
{
/*
* The argument is unused. It exists solely as an overload selector.
*/
virtual QUOTA resource_limit(QUOTA const &) const = 0;
};
typedef Resource_limit_accessor<Ram_quota> Ram_limit_accessor;
typedef Resource_limit_accessor<Cap_quota> Cap_limit_accessor;
private: private:
@ -80,8 +91,9 @@ class Init::Child : Child_policy, Routed_service::Wakeup
Version _version { _start_node->xml().attribute_value("version", Version()) }; Version _version { _start_node->xml().attribute_value("version", Version()) };
Default_route_accessor &_default_route_accessor; Default_route_accessor &_default_route_accessor;
Default_caps_accessor &_default_caps_accessor;
Ram_limit_accessor &_ram_limit_accessor; Ram_limit_accessor &_ram_limit_accessor;
Cap_limit_accessor &_cap_limit_accessor;
Name_registry &_name_registry; Name_registry &_name_registry;
@ -96,7 +108,7 @@ class Init::Child : Child_policy, Routed_service::Wakeup
if (name.valid()) if (name.valid())
return name; return name;
warning("missing 'name' attribute in '<start>' entry"); warning("missint 'name' attribute in '<start>' entry");
throw Missing_name_attribute(); throw Missing_name_attribute();
} }
@ -196,41 +208,28 @@ class Init::Child : Child_policy, Routed_service::Wakeup
Resources _resources; Resources _resources;
void _check_ram_constraints(Ram_quota ram_limit) /**
* Print diagnostic information on misconfiguration
*/
void _clamp_resources(Ram_quota ram_limit, Cap_quota cap_limit)
{ {
if (_resources.effective_ram_quota().value == 0)
warning(name(), ": no valid RAM quota defined");
if (_resources.effective_cap_quota().value == 0)
warning(name(), ": no valid cap quota defined");
/*
* If the configured RAM quota exceeds our own quota, we donate
* all remaining quota to the child.
*/
if (_resources.assigned_ram_quota.value > ram_limit.value) { if (_resources.assigned_ram_quota.value > ram_limit.value) {
_resources.assigned_ram_quota.value = ram_limit.value; warning(name(), " assigned RAM (", _resources.assigned_ram_quota, ") "
"exceeds available RAM (", ram_limit, ")");
if (_verbose.enabled()) _resources.assigned_ram_quota = ram_limit;
warn_insuff_quota(ram_limit.value);
} }
}
void _check_cap_constraints(Cap_quota cap_limit)
{
if (_resources.assigned_cap_quota.value == 0)
warning(name(), ": no valid cap quota defined");
if (_resources.assigned_cap_quota.value > cap_limit.value) { if (_resources.assigned_cap_quota.value > cap_limit.value) {
warning(name(), " assigned caps (", _resources.assigned_cap_quota, ") "
warning(name(), ": assigned caps (", _resources.assigned_cap_quota.value, ") " "exceed available caps (", cap_limit, ")");
"exceed available caps (", cap_limit.value, ")"); _resources.assigned_cap_quota = cap_limit;
_resources.assigned_cap_quota.value = cap_limit.value;
} }
} }
bool const _resources_checked; Ram_quota _configured_ram_quota() const;
Cap_quota _configured_cap_quota() const;
bool const _resources_clamped_to_limit;
Registry<Parent_service> &_parent_services; Registry<Parent_service> &_parent_services;
Registry<Routed_service> &_child_services; Registry<Routed_service> &_child_services;
@ -443,6 +442,7 @@ class Init::Child : Child_policy, Routed_service::Wakeup
Ram_quota ram_limit, Ram_quota ram_limit,
Cap_quota cap_limit, Cap_quota cap_limit,
Ram_limit_accessor &ram_limit_accessor, Ram_limit_accessor &ram_limit_accessor,
Cap_limit_accessor &cap_limit_accessor,
Prio_levels prio_levels, Prio_levels prio_levels,
Affinity::Space const &affinity_space, Affinity::Space const &affinity_space,
Registry<Parent_service> &parent_services, Registry<Parent_service> &parent_services,
@ -511,8 +511,16 @@ class Init::Child : Child_policy, Routed_service::Wakeup
*/ */
Apply_config_result apply_config(Xml_node start_node); Apply_config_result apply_config(Xml_node start_node);
void apply_ram_upgrade(); /* common code for upgrading RAM and caps */
void apply_ram_downgrade(); template <typename QUOTA, typename LIMIT_ACCESSOR>
void _apply_resource_upgrade(QUOTA &, QUOTA, LIMIT_ACCESSOR const &);
template <typename QUOTA, typename CHILD_AVAIL_QUOTA_FN>
void _apply_resource_downgrade(QUOTA &, QUOTA, QUOTA,
CHILD_AVAIL_QUOTA_FN const &);
void apply_upgrade();
void apply_downgrade();
void report_state(Xml_generator &xml, Report_detail const &detail) const; void report_state(Xml_generator &xml, Report_detail const &detail) const;
@ -577,7 +585,7 @@ class Init::Child : Child_policy, Routed_service::Wakeup
void yield_response() override void yield_response() override
{ {
apply_ram_downgrade(); apply_downgrade();
_report_update_trigger.trigger_report_update(); _report_update_trigger.trigger_report_update();
} }
}; };

View File

@ -25,8 +25,9 @@
namespace Init { struct Main; } namespace Init { struct Main; }
struct Init::Main : State_reporter::Producer, Child::Default_route_accessor, struct Init::Main : State_reporter::Producer,
Child::Default_caps_accessor, Child::Ram_limit_accessor Child::Default_route_accessor, Child::Default_caps_accessor,
Child::Ram_limit_accessor, Child::Cap_limit_accessor
{ {
Env &_env; Env &_env;
@ -59,7 +60,7 @@ struct Init::Main : State_reporter::Producer, Child::Default_route_accessor,
return Ram_quota { preserve }; return Ram_quota { preserve };
} }
Ram_quota _avail_ram() Ram_quota _avail_ram() const
{ {
Ram_quota const preserved_ram = _preserved_ram_from_config(_config_xml); Ram_quota const preserved_ram = _preserved_ram_from_config(_config_xml);
@ -85,7 +86,7 @@ struct Init::Main : State_reporter::Producer, Child::Default_route_accessor,
return Cap_quota { preserve }; return Cap_quota { preserve };
} }
Cap_quota _avail_caps() Cap_quota _avail_caps() const
{ {
Cap_quota const preserved_caps = _preserved_caps_from_config(_config_xml); Cap_quota const preserved_caps = _preserved_caps_from_config(_config_xml);
@ -103,7 +104,12 @@ struct Init::Main : State_reporter::Producer, Child::Default_route_accessor,
/** /**
* Child::Ram_limit_accessor interface * Child::Ram_limit_accessor interface
*/ */
Ram_quota ram_limit() override { return _avail_ram(); } Ram_quota resource_limit(Ram_quota const &) const override { return _avail_ram(); }
/**
* Child::Cap_limit_accessor interface
*/
Cap_quota resource_limit(Cap_quota const &) const override { return _avail_caps(); }
void _handle_resource_avail() { } void _handle_resource_avail() { }
@ -374,7 +380,7 @@ void Init::Main::_handle_config()
start_node, *this, *this, _children, start_node, *this, *this, _children,
Ram_quota { avail_ram.value - used_ram.value }, Ram_quota { avail_ram.value - used_ram.value },
Cap_quota { avail_caps.value - used_caps.value }, Cap_quota { avail_caps.value - used_caps.value },
*this, prio_levels, affinity_space, *this, *this, prio_levels, affinity_space,
_parent_services, _child_services); _parent_services, _child_services);
_children.insert(&child); _children.insert(&child);
@ -430,13 +436,13 @@ void Init::Main::_handle_config()
child.initiate_env_sessions(); }); child.initiate_env_sessions(); });
/* /*
* (Re-)distribute RAM among the childen, given their resource assignments * (Re-)distribute RAM and capability quota among the childen, given their
* and the available slack memory. We first apply possible downgrades to * resource assignments and the available slack memory. We first apply
* free as much memory as we can. This memory is then incorporated in the * possible downgrades to free as much resources as we can. These resources
* subsequent upgrade step. * are then incorporated in the subsequent upgrade step.
*/ */
_children.for_each_child([&] (Child &child) { child.apply_ram_downgrade(); }); _children.for_each_child([&] (Child &child) { child.apply_downgrade(); });
_children.for_each_child([&] (Child &child) { child.apply_ram_upgrade(); }); _children.for_each_child([&] (Child &child) { child.apply_upgrade(); });
_server.apply_config(_config_xml); _server.apply_config(_config_xml);
} }

View File

@ -16,7 +16,7 @@
namespace Init { namespace Init {
static void warn_insuff_quota(size_t const avail) static inline void warn_insuff_quota(size_t const avail)
{ {
warning("specified quota exceeds available quota, " warning("specified quota exceeds available quota, "
"proceeding with a quota of ", avail); "proceeding with a quota of ", avail);