os: Block session server backed by VFS library

The 'vfs_block' component will give access to a VFS file through a
Block session. For more detailed information please look at its
README.

(On a technical note, the server currently only allows for one
active session and has only one pending back end request but can
easily be extended in the future.)

Fixes #3781.
This commit is contained in:
Josef Söntgen 2020-05-05 15:05:22 +02:00 committed by Norman Feske
parent e56dd15a4b
commit e1aab829ca
13 changed files with 952 additions and 0 deletions

View File

@ -0,0 +1 @@
Runtime for deploying the vfs_block component from the depot.

View File

@ -0,0 +1 @@
_/src/vfs_block

View File

@ -0,0 +1 @@
2020-06-02 b6726ce32415ae65a251df28429766210a136f67

View File

@ -0,0 +1,15 @@
<runtime ram="4M" caps="100" binary="vfs_block">
<provides> <block/> </provides>
<requires> <file_system/> </requires>
<config/>
<content>
<rom label="ld.lib.so"/>
<rom label="vfs.lib.so"/>
<rom label="vfs_block"/>
</content>
</runtime>

View File

@ -0,0 +1 @@
vfs

View File

@ -0,0 +1,2 @@
SRC_DIR = src/server/vfs_block
include $(GENODE_DIR)/repos/base/recipes/src/content.inc

View File

@ -0,0 +1 @@
2020-06-02 9a4406f2db34e93092f8e0f2b498c2d045860979

View File

@ -0,0 +1,5 @@
base
os
block_session
vfs
so

143
repos/os/run/vfs_block.run Normal file
View File

@ -0,0 +1,143 @@
#
# Build
#
set build_components {
core init timer
server/vfs
server/vfs_block
app/block_tester
lib/vfs/import
}
source ${genode_dir}/repos/base/run/platform_drv.inc
append_platform_drv_build_components
build $build_components
create_boot_directory
#
# Generate config
#
append config {
<config verbose="no">
<parent-provides>
<service name="ROM"/>
<service name="RAM"/>
<service name="IRQ"/>
<service name="IO_MEM"/>
<service name="IO_PORT"/>
<service name="CAP"/>
<service name="PD"/>
<service name="RM"/>
<service name="CPU"/>
<service name="LOG"/>
<service name="SIGNAL"/>
</parent-provides>
<default-route>
<any-service> <parent/> <any-child/> </any-service>
</default-route>
<default caps="100"/>
<start name="timer">
<resource name="RAM" quantum="1M"/>
<provides><service name="Timer"/></provides>
</start>}
append_platform_drv_config
append config {
<start name="vfs">
<resource name="RAM" quantum="38M"/>
<provides> <service name="File_system"/> </provides>
<config>
<vfs>
<ram/>
<import>
<zero name="vfs_block.raw" size="32M"/>
</import>
</vfs>
<policy label_prefix="vfs_block" root="/" writeable="yes"/>
</config>
<route>
<any-service> <parent/> </any-service>
</route>
</start>
<start name="vfs_block">
<resource name="RAM" quantum="5M"/>
<provides> <service name="Block"/> </provides>
<config>
<vfs>
<fs buffer_size="4M" label="backend"/>
</vfs>
<policy label_prefix="block_tester"
file="/vfs_block.raw" block_size="512" writeable="yes"/>
</config>
<route>
<service name="File_system"> <child name="vfs"/> </service>
<any-service> <parent/> </any-service>
</route>
</start>
<start name="block_tester" caps="200">
<resource name="RAM" quantum="64M"/>
<config verbose="no" report="no" log="yes" stop_on_error="no">
<tests>
<sequential length="32M" size="4K" batch="128"/>
<sequential length="32M" size="8K" batch="128"/>
<random length="32M" size="512K" seed="0xc0ffee"/>
<ping_pong length="32M" size="16K"/>
<sequential length="32M" size="64K" batch="128" write="yes"/>
<replay verbose="no" batch="128">
<request type="read" lba="0" count="1"/>
<request type="read" lba="0" count="1"/>
<request type="read" lba="0" count="1"/>
<request type="read" lba="2048" count="1016"/>
<request type="read" lba="0" count="1"/>
<request type="read" lba="0" count="1"/>
<request type="read" lba="0" count="1"/>
<request type="read" lba="2048" count="1016"/>
<request type="read" lba="0" count="1"/>
<request type="read" lba="0" count="1"/>
<request type="read" lba="0" count="1"/>
<request type="read" lba="2048" count="1016"/>
<request type="read" lba="4096" count="1"/>
<request type="write" lba="0" count="1"/>
<request type="read" lba="1024" count="2048"/>
<request type="write" lba="4096" count="2048"/>
<request type="write" lba="0" count="1"/>
<request type="write" lba="2048" count="1"/>
<request type="write" lba="5696" count="1"/>
<request type="write" lba="5696" count="1"/>
<request type="sync" lba="0" count="1"/>
</replay>
</tests>
</config>
<route>
<service name="Block"><child name="vfs_block"/></service>
<any-service> <parent/> <any-child /> </any-service>
</route>
</start>
</config>}
install_config $config
#
# Boot modules
#
set boot_modules {
core init timer vfs vfs_block block_tester
ld.lib.so vfs.lib.so vfs_import.lib.so
}
append_platform_drv_boot_modules
build_boot_image $boot_modules
run_genode_until {.*child "block_tester" exited with exit value 0.*\n} 60

View File

@ -0,0 +1,71 @@
The 'vfs_block' component provides access to a VFS file through a Block
session. It is currently limited to serving just one particular file and
one pending back end request.
Configuration
~~~~~~~~~~~~~
The following configuration snippet illustrates how to set up the
component:
! <start name="vfs_block">
! <resource name="RAM" quantum="3M"/>
! <provides> <service name="Block"/> </provides>
! <config>
!
! <vfs>
! <fs buffer_size="2M" label="backend"/>
! </vfs>
!
! <policy label_prefix="client"
! file="/vfs_block.img" block_size="512" writeable="yes"/>
! </config>
! <route>
! <service name="File_system" label="backend>
! <child name="fs_provider"/> </service>
! <any-service> <parent/> </any-service>
! </route>
! </start>
With this configuration the component will give the component 'client' access
to the file '/vfs_block.img' specified by the 'file' attribute. This file
is accessed by using a file system connection to another component
'fs_provider'.
Block requests will then be translated to VFS requests operating directly on
this file. The block size must be specified via the 'block_size' attribute. It
defaults to 512 bytes. The block count is determined by querying the backing
file and dividing its size by the block size. Pseudo file systems which do not
return a proper size in their 'stat' implementation will therefore not work.
The 'writeable' attribute denotes if the Block session is allowed to perform
write requests. However, if the underlying file is read-only such requests
will nonetheless fail. The default value is 'no'.
The component can also be configured to provide access to read-only
files like ISO images:
! <start name="vfs_block">
! <resource name="RAM" quantum="2M"/>
! <provides> <service name="Block"/> </provides>
! <config>
!
! <vfs>
! <rom name="genode.iso"/>
! </vfs>
!
! <default-policy file="/genode.iso" block_size="2048"/>
! </config>
! <route>
! <any-service> <parent/> </any-service>
! </route>
! </start>
In this configuration the 'genode.iso' ROM module is provided by the
parent of the 'vfs_block' component.
Example
~~~~~~~
Please take a look into the 'repos/os/run/vfs_block.run' run script for an
exemplary integration.

View File

@ -0,0 +1,411 @@
/*
* \brief VFS file to Block session
* \author Josef Soentgen
* \date 2020-05-05
*/
/*
* Copyright (C) 2020 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU Affero General Public License version 3.
*/
/* Genode includes */
#include <base/attached_ram_dataspace.h>
#include <base/attached_rom_dataspace.h>
#include <base/component.h>
#include <base/heap.h>
#include <block/request_stream.h>
#include <os/session_policy.h>
#include <util/string.h>
#include <vfs/simple_env.h>
#include <vfs/file_system_factory.h>
#include <vfs/dir_file_system.h>
/* local includes */
#include "job.h"
using namespace Genode;
namespace Vfs_block {
using File_path = String<Vfs::MAX_PATH_LEN>;
struct File_info;
File_info file_info_from_policy(Session_policy const &);
class File;
} /* namespace Vfs_block */
struct Vfs_block::File_info
{
File_path const path;
bool const writeable;
size_t const block_size;
};
Vfs_block::File_info Vfs_block::file_info_from_policy(Session_policy const &policy)
{
File_path const file_path =
policy.attribute_value("file", File_path());
bool const writeable =
policy.attribute_value("writeable", false);
size_t const block_size =
policy.attribute_value("block_size", 512u);
return File_info {
.path = file_path,
.writeable = writeable,
.block_size = block_size };
}
class Vfs_block::File
{
private:
File(const File&) = delete;
File& operator=(const File&) = delete;
Vfs::File_system &_vfs;
Vfs::Vfs_handle *_vfs_handle;
Constructible<Vfs_block::Job> _job { };
struct Io_response_handler : Vfs::Io_response_handler
{
Signal_context_capability sigh { };
void read_ready_response() override { }
void io_progress_response() override
{
if (sigh.valid()) {
Signal_transmitter(sigh).submit();
}
}
};
Io_response_handler _io_response_handler { };
Block::Session::Info _block_info { };
public:
File(Genode::Allocator &alloc,
Vfs::File_system &vfs,
Signal_context_capability sigh,
File_info const &info)
:
_vfs { vfs },
_vfs_handle { nullptr }
{
using DS = Vfs::Directory_service;
unsigned const mode =
info.writeable ? DS::OPEN_MODE_RDWR
: DS::OPEN_MODE_RDONLY;
using Open_result = DS::Open_result;
Open_result res = _vfs.open(info.path.string(), mode,
&_vfs_handle, alloc);
if (res != Open_result::OPEN_OK) {
error("Could not open '", info.path.string(), "'");
throw Genode::Exception();
}
using Stat_result = DS::Stat_result;
Vfs::Directory_service::Stat stat { };
Stat_result stat_res = _vfs.stat(info.path.string(), stat);
if (stat_res != Stat_result::STAT_OK) {
_vfs.close(_vfs_handle);
error("Could not stat '", info.path.string(), "'");
throw Genode::Exception();
}
Block::block_number_t const block_count =
stat.size / info.block_size;
_block_info = Block::Session::Info {
.block_size = info.block_size,
.block_count = block_count,
.align_log2 = log2(info.block_size),
.writeable = info.writeable,
};
_io_response_handler.sigh = sigh;
_vfs_handle->handler(&_io_response_handler);
log("Block session for file '", info.path.string(),
"' with block count: ", _block_info.block_count,
" block size: ", _block_info.block_size,
" writeable: ", _block_info.writeable);
}
~File()
{
/*
* Sync is expected to be done through the Block
* request stream, omit it here.
*/
_vfs.close(_vfs_handle);
}
Block::Session::Info block_info() const { return _block_info; }
bool execute()
{
if (!_job.constructed()) {
return false;
}
return _job->execute();
}
bool acceptable() const
{
return !_job.constructed();
}
bool valid(Block::Request const &request)
{
using Type = Block::Operation::Type;
/*
* For READ/WRITE requests we need a valid block count
* and number. Other requests might not provide such
* information because it is not needed.
*/
Block::Operation const op = request.operation;
switch (op.type) {
case Type::READ: [[fallthrough]];
case Type::WRITE:
return op.count
&& (op.block_number + op.count) <= _block_info.block_count;
case Type::TRIM: [[fallthrough]];
case Type::SYNC: return true;
default: return false;
}
}
void submit(Block::Request req, void *ptr, size_t length)
{
file_offset const base_offset =
req.operation.block_number * _block_info.block_size;
_job.construct(*_vfs_handle, req, base_offset,
reinterpret_cast<char*>(ptr), length);
}
template <typename FN>
void with_any_completed_job(FN const &fn)
{
if (!_job.constructed() || !_job->completed()) {
return;
}
Block::Request req = _job->request;
req.success = _job->succeeded();
_job.destruct();
fn(req);
}
};
struct Block_session_component : Rpc_object<Block::Session>,
private Block::Request_stream
{
Entrypoint &_ep;
using Block::Request_stream::with_requests;
using Block::Request_stream::with_content;
using Block::Request_stream::try_acknowledge;
using Block::Request_stream::wakeup_client_if_needed;
Vfs_block::File &_file;
Block_session_component(Region_map &rm,
Entrypoint &ep,
Dataspace_capability ds,
Signal_context_capability sigh,
Vfs_block::File &file)
:
Request_stream { rm, ds, ep, sigh, file.block_info() },
_ep { ep },
_file { file }
{
_ep.manage(*this);
}
~Block_session_component() { _ep.dissolve(*this); }
Info info() const override { return Request_stream::info(); }
Capability<Tx> tx_cap() override { return Request_stream::tx_cap(); }
void handle_request()
{
for (;;) {
bool progress = false;
with_requests([&] (Block::Request request) {
using Response = Block::Request_stream::Response;
if (!_file.acceptable()) {
return Response::RETRY;
}
if (!_file.valid(request)) {
return Response::REJECTED;
}
using Op = Block::Operation;
bool const payload =
Op::has_payload(request.operation.type);
try {
if (payload) {
with_content(request,
[&] (void *ptr, size_t size) {
_file.submit(request, ptr, size);
});
} else {
_file.submit(request, nullptr, 0);
}
} catch (Vfs_block::Job::Unsupported_Operation) {
return Response::REJECTED;
}
progress |= true;
return Response::ACCEPTED;
});
progress |= _file.execute();
try_acknowledge([&] (Block::Request_stream::Ack &ack) {
auto ack_request = [&] (Block::Request request) {
ack.submit(request);
progress |= true;
};
_file.with_any_completed_job(ack_request);
});
if (!progress) {
break;
}
}
wakeup_client_if_needed();
}
};
struct Main : Rpc_object<Typed_root<Block::Session>>
{
Env &_env;
Signal_handler<Main> _request_handler {
_env.ep(), *this, &Main::_handle_requests };
Heap _heap { _env.ram(), _env.rm() };
Attached_rom_dataspace _config_rom { _env, "config" };
Vfs::Simple_env _vfs_env { _env, _heap,
_config_rom.xml().sub_node("vfs") };
Constructible<Attached_ram_dataspace> _block_ds { };
Constructible<Vfs_block::File> _block_file { };
Constructible<Block_session_component> _block_session { };
void _handle_requests()
{
if (!_block_session.constructed()) {
return;
}
_block_session->handle_request();
}
/*
* Root interface
*/
Capability<Session> session(Root::Session_args const &args,
Affinity const &) override
{
if (_block_session.constructed()) {
throw Service_denied();
}
size_t const tx_buf_size =
Arg_string::find_arg(args.string(),
"tx_buf_size").aligned_size();
Ram_quota const ram_quota = ram_quota_from_args(args.string());
if (tx_buf_size > ram_quota.value) {
warning("communication buffer size exceeds session quota");
throw Insufficient_ram_quota();
}
/* make sure policy is up-to-date */
_config_rom.update();
Session_label const label { label_from_args(args.string()) };
Session_policy const policy { label, _config_rom.xml() };
if (!policy.has_attribute("file")) {
error("policy lacks 'file' attribute");
throw Service_denied();
}
Vfs_block::File_info const file_info =
Vfs_block::file_info_from_policy(policy);
try {
_block_ds.construct(_env.ram(), _env.rm(), tx_buf_size);
_block_file.construct(_heap, _vfs_env.root_dir(),
_request_handler, file_info);
_block_session.construct(_env.rm(), _env.ep(),
_block_ds->cap(),
_request_handler, *_block_file);
return _block_session->cap();
} catch (...) {
throw Service_denied();
}
}
void upgrade(Capability<Session>, Root::Upgrade_args const &) override { }
void close(Capability<Session> cap) override
{
if (cap == _block_session->cap()) {
_block_session.destruct();
_block_file.destruct();
_block_ds.destruct();
}
}
Main(Env &env) : _env(env)
{
_env.parent().announce(_env.ep().manage(*this));
}
};
void Component::construct(Genode::Env &env) { static Main main(env); }

View File

@ -0,0 +1,297 @@
/*
* \brief VFS file to Block session
* \author Josef Soentgen
* \date 2020-05-05
*/
/*
* Copyright (C) 2020 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU Affero General Public License version 3.
*/
#ifndef _VFS_BLOCK__JOB_
#define _VFS_BLOCK__JOB_
namespace Vfs_block {
using file_size = Vfs::file_size;
using file_offset = Vfs::file_offset;
struct Job
{
struct Unsupported_Operation : Genode::Exception { };
struct Invalid_state : Genode::Exception { };
enum State { PENDING, IN_PROGRESS, COMPLETE, };
static State _initial_state(Block::Operation::Type type)
{
using Type = Block::Operation::Type;
switch (type) {
case Type::READ: return State::PENDING;
case Type::WRITE: return State::PENDING;
case Type::TRIM: return State::PENDING;
case Type::SYNC: return State::PENDING;
default: throw Unsupported_Operation();
}
}
static char const *_state_to_string(State s)
{
switch (s) {
case State::PENDING: return "PENDING";
case State::IN_PROGRESS: return "IN_PROGRESS";
case State::COMPLETE: return "COMPLETE";
}
throw Invalid_state();
}
Vfs::Vfs_handle &_handle;
Block::Request const request;
char *data;
State state;
file_offset const base_offset;
file_offset current_offset;
file_size current_count;
bool success;
bool complete;
bool _read()
{
bool progress = false;
switch (state) {
case State::PENDING:
_handle.seek(base_offset + current_offset);
if (!_handle.fs().queue_read(&_handle, current_count)) {
return progress;
}
state = State::IN_PROGRESS;
progress = true;
[[fallthrough]];
case State::IN_PROGRESS:
{
using Result = Vfs::File_io_service::Read_result;
bool completed = false;
file_size out = 0;
Result const result =
_handle.fs().complete_read(&_handle,
data + current_offset,
current_count, out);
if ( result == Result::READ_QUEUED
|| result == Result::READ_ERR_INTERRUPT
|| result == Result::READ_ERR_AGAIN
|| result == Result::READ_ERR_WOULD_BLOCK) {
return progress;
} else
if (result == Result::READ_OK) {
current_offset += out;
current_count -= out;
success = true;
} else
if ( result == Result::READ_ERR_IO
|| result == Result::READ_ERR_INVALID) {
success = false;
completed = true;
}
if (current_count == 0 || completed) {
state = State::COMPLETE;
} else {
state = State::PENDING;
/* partial read, keep trying */
return true;
}
progress = true;
}
[[fallthrough]];
case State::COMPLETE:
complete = true;
progress = true;
default: break;
}
return progress;
}
bool _write()
{
bool progress = false;
switch (state) {
case State::PENDING:
_handle.seek(base_offset + current_offset);
state = State::IN_PROGRESS;
progress = true;
[[fallthrough]];
case State::IN_PROGRESS:
{
using Result = Vfs::File_io_service::Write_result;
bool completed = false;
file_size out = 0;
Result result = Result::WRITE_ERR_INVALID;
try {
result = _handle.fs().write(&_handle,
data + current_offset,
current_count, out);
} catch (Vfs::File_io_service::Insufficient_buffer) {
return progress;
}
if ( result == Result::WRITE_ERR_AGAIN
|| result == Result::WRITE_ERR_INTERRUPT
|| result == Result::WRITE_ERR_WOULD_BLOCK) {
return progress;
} else
if (result == Result::WRITE_OK) {
current_offset += out;
current_count -= out;
success = true;
} else
if ( result == Result::WRITE_ERR_IO
|| result == Result::WRITE_ERR_INVALID) {
success = false;
completed = true;
}
if (current_count == 0 || completed) {
state = State::COMPLETE;
} else {
state = State::PENDING;
/* partial write, keep trying */
return true;
}
progress = true;
}
[[fallthrough]];
case State::COMPLETE:
complete = true;
progress = true;
default: break;
}
return progress;
}
bool _sync()
{
bool progress = false;
switch (state) {
case State::PENDING:
if (!_handle.fs().queue_sync(&_handle)) {
return progress;
}
state = State::IN_PROGRESS;
progress = true;
[[fallthrough]];
case State::IN_PROGRESS:
{
using Result = Vfs::File_io_service::Sync_result;
Result const result = _handle.fs().complete_sync(&_handle);
if (result == Result::SYNC_QUEUED) {
return progress;
} else
if (result == Result::SYNC_ERR_INVALID) {
success = false;
} else
if (result == Result::SYNC_OK) {
success = true;
}
state = State::COMPLETE;
progress = true;
}
[[fallthrough]];
case State::COMPLETE:
complete = true;
progress = true;
default: break;
}
return progress;
}
bool _trim()
{
/*
* TRIM is not implemented but nonetheless report success
* back to client as it merely is a hint.
*/
success = true;
complete = true;
return true;
}
Job(Vfs::Vfs_handle &handle,
Block::Request request,
file_offset base_offset,
char *data,
file_size length)
:
_handle { handle },
request { request },
data { data },
state { _initial_state(request.operation.type) },
base_offset { base_offset },
current_offset { 0 },
current_count { length },
success { false },
complete { false }
{ }
bool completed() const { return complete; }
bool succeeded() const { return success; }
void print(Genode::Output &out) const
{
Genode::print(out, "(", request.operation, ")",
" state: ", _state_to_string(state),
" base_offset: ", base_offset,
" current_offset: ", current_offset,
" current_count: ", current_count,
" success: ", success,
" complete: ", complete);
}
bool execute()
{
using Type = Block::Operation::Type;
switch (request.operation.type) {
case Type::READ: return _read();
case Type::WRITE: return _write();
case Type::SYNC: return _sync();
case Type::TRIM: return _trim();
default: return false;
}
}
};
} /* namespace Vfs_block */
#endif /* _VFS_BLOCK__JOB_ */

View File

@ -0,0 +1,3 @@
TARGET = vfs_block
SRC_CC = component.cc
LIBS = base vfs