diff --git a/repos/os/recipes/pkg/vfs_block/README b/repos/os/recipes/pkg/vfs_block/README
new file mode 100644
index 0000000000..8dacf73083
--- /dev/null
+++ b/repos/os/recipes/pkg/vfs_block/README
@@ -0,0 +1 @@
+Runtime for deploying the vfs_block component from the depot.
diff --git a/repos/os/recipes/pkg/vfs_block/archives b/repos/os/recipes/pkg/vfs_block/archives
new file mode 100644
index 0000000000..d15e0fda17
--- /dev/null
+++ b/repos/os/recipes/pkg/vfs_block/archives
@@ -0,0 +1 @@
+_/src/vfs_block
diff --git a/repos/os/recipes/pkg/vfs_block/hash b/repos/os/recipes/pkg/vfs_block/hash
new file mode 100644
index 0000000000..909eefd29e
--- /dev/null
+++ b/repos/os/recipes/pkg/vfs_block/hash
@@ -0,0 +1 @@
+2020-06-02 b6726ce32415ae65a251df28429766210a136f67
diff --git a/repos/os/recipes/pkg/vfs_block/runtime b/repos/os/recipes/pkg/vfs_block/runtime
new file mode 100644
index 0000000000..6da4a57dac
--- /dev/null
+++ b/repos/os/recipes/pkg/vfs_block/runtime
@@ -0,0 +1,15 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/repos/os/recipes/src/vfs_block/api b/repos/os/recipes/src/vfs_block/api
new file mode 100644
index 0000000000..0a15d9fc41
--- /dev/null
+++ b/repos/os/recipes/src/vfs_block/api
@@ -0,0 +1 @@
+vfs
diff --git a/repos/os/recipes/src/vfs_block/content.mk b/repos/os/recipes/src/vfs_block/content.mk
new file mode 100644
index 0000000000..6050acc6fc
--- /dev/null
+++ b/repos/os/recipes/src/vfs_block/content.mk
@@ -0,0 +1,2 @@
+SRC_DIR = src/server/vfs_block
+include $(GENODE_DIR)/repos/base/recipes/src/content.inc
diff --git a/repos/os/recipes/src/vfs_block/hash b/repos/os/recipes/src/vfs_block/hash
new file mode 100644
index 0000000000..c0a15ea79b
--- /dev/null
+++ b/repos/os/recipes/src/vfs_block/hash
@@ -0,0 +1 @@
+2020-06-02 9a4406f2db34e93092f8e0f2b498c2d045860979
diff --git a/repos/os/recipes/src/vfs_block/used_apis b/repos/os/recipes/src/vfs_block/used_apis
new file mode 100644
index 0000000000..f67898ee79
--- /dev/null
+++ b/repos/os/recipes/src/vfs_block/used_apis
@@ -0,0 +1,5 @@
+base
+os
+block_session
+vfs
+so
diff --git a/repos/os/run/vfs_block.run b/repos/os/run/vfs_block.run
new file mode 100644
index 0000000000..bed2d5bac7
--- /dev/null
+++ b/repos/os/run/vfs_block.run
@@ -0,0 +1,143 @@
+#
+# Build
+#
+set build_components {
+ core init timer
+ server/vfs
+ server/vfs_block
+ app/block_tester
+ lib/vfs/import
+}
+
+source ${genode_dir}/repos/base/run/platform_drv.inc
+append_platform_drv_build_components
+
+build $build_components
+
+
+create_boot_directory
+
+#
+# Generate config
+#
+append config {
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ }
+
+append_platform_drv_config
+
+append config {
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+}
+
+install_config $config
+
+#
+# Boot modules
+#
+
+set boot_modules {
+ core init timer vfs vfs_block block_tester
+ ld.lib.so vfs.lib.so vfs_import.lib.so
+}
+
+append_platform_drv_boot_modules
+
+build_boot_image $boot_modules
+
+run_genode_until {.*child "block_tester" exited with exit value 0.*\n} 60
diff --git a/repos/os/src/server/vfs_block/README b/repos/os/src/server/vfs_block/README
new file mode 100644
index 0000000000..c2e2ee3892
--- /dev/null
+++ b/repos/os/src/server/vfs_block/README
@@ -0,0 +1,71 @@
+The 'vfs_block' component provides access to a VFS file through a Block
+session. It is currently limited to serving just one particular file and
+one pending back end request.
+
+
+Configuration
+~~~~~~~~~~~~~
+
+The following configuration snippet illustrates how to set up the
+component:
+
+!
+!
+!
+!
+!
+!
+!
+!
+!
+!
+!
+!
+!
+!
+!
+!
+
+With this configuration the component will give the component 'client' access
+to the file '/vfs_block.img' specified by the 'file' attribute. This file
+is accessed by using a file system connection to another component
+'fs_provider'.
+Block requests will then be translated to VFS requests operating directly on
+this file. The block size must be specified via the 'block_size' attribute. It
+defaults to 512 bytes. The block count is determined by querying the backing
+file and dividing its size by the block size. Pseudo file systems which do not
+return a proper size in their 'stat' implementation will therefore not work.
+The 'writeable' attribute denotes if the Block session is allowed to perform
+write requests. However, if the underlying file is read-only such requests
+will nonetheless fail. The default value is 'no'.
+
+The component can also be configured to provide access to read-only
+files like ISO images:
+
+!
+!
+!
+!
+!
+!
+!
+!
+!
+!
+!
+!
+!
+!
+!
+
+In this configuration the 'genode.iso' ROM module is provided by the
+parent of the 'vfs_block' component.
+
+
+Example
+~~~~~~~
+
+Please take a look into the 'repos/os/run/vfs_block.run' run script for an
+exemplary integration.
diff --git a/repos/os/src/server/vfs_block/component.cc b/repos/os/src/server/vfs_block/component.cc
new file mode 100644
index 0000000000..dd14d019dc
--- /dev/null
+++ b/repos/os/src/server/vfs_block/component.cc
@@ -0,0 +1,411 @@
+/*
+ * \brief VFS file to Block session
+ * \author Josef Soentgen
+ * \date 2020-05-05
+ */
+
+/*
+ * Copyright (C) 2020 Genode Labs GmbH
+ *
+ * This file is part of the Genode OS framework, which is distributed
+ * under the terms of the GNU Affero General Public License version 3.
+ */
+
+/* Genode includes */
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+/* local includes */
+#include "job.h"
+
+
+using namespace Genode;
+
+
+namespace Vfs_block {
+
+ using File_path = String;
+ struct File_info;
+ File_info file_info_from_policy(Session_policy const &);
+ class File;
+
+} /* namespace Vfs_block */
+
+
+struct Vfs_block::File_info
+{
+ File_path const path;
+ bool const writeable;
+ size_t const block_size;
+};
+
+
+Vfs_block::File_info Vfs_block::file_info_from_policy(Session_policy const &policy)
+{
+ File_path const file_path =
+ policy.attribute_value("file", File_path());
+
+ bool const writeable =
+ policy.attribute_value("writeable", false);
+
+ size_t const block_size =
+ policy.attribute_value("block_size", 512u);
+
+ return File_info {
+ .path = file_path,
+ .writeable = writeable,
+ .block_size = block_size };
+}
+
+
+class Vfs_block::File
+{
+ private:
+
+ File(const File&) = delete;
+ File& operator=(const File&) = delete;
+
+ Vfs::File_system &_vfs;
+ Vfs::Vfs_handle *_vfs_handle;
+
+ Constructible _job { };
+
+ struct Io_response_handler : Vfs::Io_response_handler
+ {
+ Signal_context_capability sigh { };
+
+ void read_ready_response() override { }
+
+ void io_progress_response() override
+ {
+ if (sigh.valid()) {
+ Signal_transmitter(sigh).submit();
+ }
+ }
+ };
+ Io_response_handler _io_response_handler { };
+
+ Block::Session::Info _block_info { };
+
+ public:
+
+ File(Genode::Allocator &alloc,
+ Vfs::File_system &vfs,
+ Signal_context_capability sigh,
+ File_info const &info)
+ :
+ _vfs { vfs },
+ _vfs_handle { nullptr }
+ {
+ using DS = Vfs::Directory_service;
+
+ unsigned const mode =
+ info.writeable ? DS::OPEN_MODE_RDWR
+ : DS::OPEN_MODE_RDONLY;
+
+ using Open_result = DS::Open_result;
+ Open_result res = _vfs.open(info.path.string(), mode,
+ &_vfs_handle, alloc);
+ if (res != Open_result::OPEN_OK) {
+ error("Could not open '", info.path.string(), "'");
+ throw Genode::Exception();
+ }
+
+ using Stat_result = DS::Stat_result;
+ Vfs::Directory_service::Stat stat { };
+ Stat_result stat_res = _vfs.stat(info.path.string(), stat);
+ if (stat_res != Stat_result::STAT_OK) {
+ _vfs.close(_vfs_handle);
+ error("Could not stat '", info.path.string(), "'");
+ throw Genode::Exception();
+ }
+
+ Block::block_number_t const block_count =
+ stat.size / info.block_size;
+
+ _block_info = Block::Session::Info {
+ .block_size = info.block_size,
+ .block_count = block_count,
+ .align_log2 = log2(info.block_size),
+ .writeable = info.writeable,
+ };
+
+ _io_response_handler.sigh = sigh;
+ _vfs_handle->handler(&_io_response_handler);
+
+ log("Block session for file '", info.path.string(),
+ "' with block count: ", _block_info.block_count,
+ " block size: ", _block_info.block_size,
+ " writeable: ", _block_info.writeable);
+ }
+
+ ~File()
+ {
+ /*
+ * Sync is expected to be done through the Block
+ * request stream, omit it here.
+ */
+ _vfs.close(_vfs_handle);
+ }
+
+ Block::Session::Info block_info() const { return _block_info; }
+
+ bool execute()
+ {
+ if (!_job.constructed()) {
+ return false;
+ }
+
+ return _job->execute();
+ }
+
+ bool acceptable() const
+ {
+ return !_job.constructed();
+ }
+
+ bool valid(Block::Request const &request)
+ {
+ using Type = Block::Operation::Type;
+
+ /*
+ * For READ/WRITE requests we need a valid block count
+ * and number. Other requests might not provide such
+ * information because it is not needed.
+ */
+
+ Block::Operation const op = request.operation;
+ switch (op.type) {
+ case Type::READ: [[fallthrough]];
+ case Type::WRITE:
+ return op.count
+ && (op.block_number + op.count) <= _block_info.block_count;
+
+ case Type::TRIM: [[fallthrough]];
+ case Type::SYNC: return true;
+ default: return false;
+ }
+ }
+
+ void submit(Block::Request req, void *ptr, size_t length)
+ {
+ file_offset const base_offset =
+ req.operation.block_number * _block_info.block_size;
+
+ _job.construct(*_vfs_handle, req, base_offset,
+ reinterpret_cast(ptr), length);
+ }
+
+ template
+ void with_any_completed_job(FN const &fn)
+ {
+ if (!_job.constructed() || !_job->completed()) {
+ return;
+ }
+
+ Block::Request req = _job->request;
+ req.success = _job->succeeded();
+
+ _job.destruct();
+
+ fn(req);
+ }
+};
+
+
+struct Block_session_component : Rpc_object,
+ private Block::Request_stream
+{
+ Entrypoint &_ep;
+
+ using Block::Request_stream::with_requests;
+ using Block::Request_stream::with_content;
+ using Block::Request_stream::try_acknowledge;
+ using Block::Request_stream::wakeup_client_if_needed;
+
+ Vfs_block::File &_file;
+
+ Block_session_component(Region_map &rm,
+ Entrypoint &ep,
+ Dataspace_capability ds,
+ Signal_context_capability sigh,
+ Vfs_block::File &file)
+ :
+ Request_stream { rm, ds, ep, sigh, file.block_info() },
+ _ep { ep },
+ _file { file }
+ {
+ _ep.manage(*this);
+ }
+
+ ~Block_session_component() { _ep.dissolve(*this); }
+
+ Info info() const override { return Request_stream::info(); }
+
+ Capability tx_cap() override { return Request_stream::tx_cap(); }
+
+ void handle_request()
+ {
+ for (;;) {
+
+ bool progress = false;
+
+ with_requests([&] (Block::Request request) {
+
+ using Response = Block::Request_stream::Response;
+
+ if (!_file.acceptable()) {
+ return Response::RETRY;
+ }
+
+ if (!_file.valid(request)) {
+ return Response::REJECTED;
+ }
+
+ using Op = Block::Operation;
+ bool const payload =
+ Op::has_payload(request.operation.type);
+
+ try {
+ if (payload) {
+ with_content(request,
+ [&] (void *ptr, size_t size) {
+ _file.submit(request, ptr, size);
+ });
+ } else {
+ _file.submit(request, nullptr, 0);
+ }
+ } catch (Vfs_block::Job::Unsupported_Operation) {
+ return Response::REJECTED;
+ }
+
+ progress |= true;
+ return Response::ACCEPTED;
+ });
+
+ progress |= _file.execute();
+
+ try_acknowledge([&] (Block::Request_stream::Ack &ack) {
+
+ auto ack_request = [&] (Block::Request request) {
+ ack.submit(request);
+ progress |= true;
+ };
+
+ _file.with_any_completed_job(ack_request);
+ });
+
+ if (!progress) {
+ break;
+ }
+ }
+
+ wakeup_client_if_needed();
+ }
+};
+
+
+struct Main : Rpc_object>
+{
+ Env &_env;
+
+ Signal_handler _request_handler {
+ _env.ep(), *this, &Main::_handle_requests };
+
+ Heap _heap { _env.ram(), _env.rm() };
+ Attached_rom_dataspace _config_rom { _env, "config" };
+
+ Vfs::Simple_env _vfs_env { _env, _heap,
+ _config_rom.xml().sub_node("vfs") };
+
+ Constructible _block_ds { };
+ Constructible _block_file { };
+ Constructible _block_session { };
+
+ void _handle_requests()
+ {
+ if (!_block_session.constructed()) {
+ return;
+ }
+
+ _block_session->handle_request();
+ }
+
+
+ /*
+ * Root interface
+ */
+
+ Capability session(Root::Session_args const &args,
+ Affinity const &) override
+ {
+ if (_block_session.constructed()) {
+ throw Service_denied();
+ }
+
+ size_t const tx_buf_size =
+ Arg_string::find_arg(args.string(),
+ "tx_buf_size").aligned_size();
+
+ Ram_quota const ram_quota = ram_quota_from_args(args.string());
+
+ if (tx_buf_size > ram_quota.value) {
+ warning("communication buffer size exceeds session quota");
+ throw Insufficient_ram_quota();
+ }
+
+ /* make sure policy is up-to-date */
+ _config_rom.update();
+
+ Session_label const label { label_from_args(args.string()) };
+ Session_policy const policy { label, _config_rom.xml() };
+
+ if (!policy.has_attribute("file")) {
+ error("policy lacks 'file' attribute");
+ throw Service_denied();
+ }
+
+ Vfs_block::File_info const file_info =
+ Vfs_block::file_info_from_policy(policy);
+
+ try {
+ _block_ds.construct(_env.ram(), _env.rm(), tx_buf_size);
+ _block_file.construct(_heap, _vfs_env.root_dir(),
+ _request_handler, file_info);
+ _block_session.construct(_env.rm(), _env.ep(),
+ _block_ds->cap(),
+ _request_handler, *_block_file);
+
+ return _block_session->cap();
+ } catch (...) {
+ throw Service_denied();
+ }
+ }
+
+ void upgrade(Capability, Root::Upgrade_args const &) override { }
+
+ void close(Capability cap) override
+ {
+ if (cap == _block_session->cap()) {
+ _block_session.destruct();
+ _block_file.destruct();
+ _block_ds.destruct();
+ }
+ }
+
+ Main(Env &env) : _env(env)
+ {
+ _env.parent().announce(_env.ep().manage(*this));
+ }
+};
+
+
+void Component::construct(Genode::Env &env) { static Main main(env); }
diff --git a/repos/os/src/server/vfs_block/job.h b/repos/os/src/server/vfs_block/job.h
new file mode 100644
index 0000000000..36fa8848cb
--- /dev/null
+++ b/repos/os/src/server/vfs_block/job.h
@@ -0,0 +1,297 @@
+/*
+ * \brief VFS file to Block session
+ * \author Josef Soentgen
+ * \date 2020-05-05
+ */
+
+/*
+ * Copyright (C) 2020 Genode Labs GmbH
+ *
+ * This file is part of the Genode OS framework, which is distributed
+ * under the terms of the GNU Affero General Public License version 3.
+ */
+
+#ifndef _VFS_BLOCK__JOB_
+#define _VFS_BLOCK__JOB_
+
+namespace Vfs_block {
+
+ using file_size = Vfs::file_size;
+ using file_offset = Vfs::file_offset;
+
+ struct Job
+ {
+ struct Unsupported_Operation : Genode::Exception { };
+ struct Invalid_state : Genode::Exception { };
+
+ enum State { PENDING, IN_PROGRESS, COMPLETE, };
+
+ static State _initial_state(Block::Operation::Type type)
+ {
+ using Type = Block::Operation::Type;
+
+ switch (type) {
+ case Type::READ: return State::PENDING;
+ case Type::WRITE: return State::PENDING;
+ case Type::TRIM: return State::PENDING;
+ case Type::SYNC: return State::PENDING;
+ default: throw Unsupported_Operation();
+ }
+ }
+
+ static char const *_state_to_string(State s)
+ {
+ switch (s) {
+ case State::PENDING: return "PENDING";
+ case State::IN_PROGRESS: return "IN_PROGRESS";
+ case State::COMPLETE: return "COMPLETE";
+ }
+
+ throw Invalid_state();
+ }
+
+ Vfs::Vfs_handle &_handle;
+
+ Block::Request const request;
+ char *data;
+ State state;
+ file_offset const base_offset;
+ file_offset current_offset;
+ file_size current_count;
+
+ bool success;
+ bool complete;
+
+ bool _read()
+ {
+ bool progress = false;
+
+ switch (state) {
+ case State::PENDING:
+
+ _handle.seek(base_offset + current_offset);
+ if (!_handle.fs().queue_read(&_handle, current_count)) {
+ return progress;
+ }
+
+ state = State::IN_PROGRESS;
+ progress = true;
+ [[fallthrough]];
+ case State::IN_PROGRESS:
+ {
+ using Result = Vfs::File_io_service::Read_result;
+
+ bool completed = false;
+ file_size out = 0;
+
+ Result const result =
+ _handle.fs().complete_read(&_handle,
+ data + current_offset,
+ current_count, out);
+ if ( result == Result::READ_QUEUED
+ || result == Result::READ_ERR_INTERRUPT
+ || result == Result::READ_ERR_AGAIN
+ || result == Result::READ_ERR_WOULD_BLOCK) {
+ return progress;
+ } else
+
+ if (result == Result::READ_OK) {
+ current_offset += out;
+ current_count -= out;
+ success = true;
+ } else
+
+ if ( result == Result::READ_ERR_IO
+ || result == Result::READ_ERR_INVALID) {
+ success = false;
+ completed = true;
+ }
+
+ if (current_count == 0 || completed) {
+ state = State::COMPLETE;
+ } else {
+ state = State::PENDING;
+ /* partial read, keep trying */
+ return true;
+ }
+ progress = true;
+ }
+ [[fallthrough]];
+ case State::COMPLETE:
+
+ complete = true;
+ progress = true;
+ default: break;
+ }
+
+ return progress;
+ }
+
+ bool _write()
+ {
+ bool progress = false;
+
+ switch (state) {
+ case State::PENDING:
+
+ _handle.seek(base_offset + current_offset);
+ state = State::IN_PROGRESS;
+ progress = true;
+ [[fallthrough]];
+ case State::IN_PROGRESS:
+ {
+ using Result = Vfs::File_io_service::Write_result;
+
+ bool completed = false;
+ file_size out = 0;
+
+ Result result = Result::WRITE_ERR_INVALID;
+ try {
+ result = _handle.fs().write(&_handle,
+ data + current_offset,
+ current_count, out);
+ } catch (Vfs::File_io_service::Insufficient_buffer) {
+ return progress;
+ }
+
+ if ( result == Result::WRITE_ERR_AGAIN
+ || result == Result::WRITE_ERR_INTERRUPT
+ || result == Result::WRITE_ERR_WOULD_BLOCK) {
+ return progress;
+ } else
+
+ if (result == Result::WRITE_OK) {
+ current_offset += out;
+ current_count -= out;
+ success = true;
+ } else
+
+ if ( result == Result::WRITE_ERR_IO
+ || result == Result::WRITE_ERR_INVALID) {
+ success = false;
+ completed = true;
+ }
+
+ if (current_count == 0 || completed) {
+ state = State::COMPLETE;
+ } else {
+ state = State::PENDING;
+ /* partial write, keep trying */
+ return true;
+ }
+ progress = true;
+ }
+ [[fallthrough]];
+ case State::COMPLETE:
+
+ complete = true;
+ progress = true;
+ default: break;
+ }
+
+ return progress;
+ }
+
+ bool _sync()
+ {
+ bool progress = false;
+
+ switch (state) {
+ case State::PENDING:
+
+ if (!_handle.fs().queue_sync(&_handle)) {
+ return progress;
+ }
+ state = State::IN_PROGRESS;
+ progress = true;
+ [[fallthrough]];
+ case State::IN_PROGRESS:
+ {
+ using Result = Vfs::File_io_service::Sync_result;
+ Result const result = _handle.fs().complete_sync(&_handle);
+
+ if (result == Result::SYNC_QUEUED) {
+ return progress;
+ } else
+
+ if (result == Result::SYNC_ERR_INVALID) {
+ success = false;
+ } else
+
+ if (result == Result::SYNC_OK) {
+ success = true;
+ }
+
+ state = State::COMPLETE;
+ progress = true;
+ }
+ [[fallthrough]];
+ case State::COMPLETE:
+
+ complete = true;
+ progress = true;
+ default: break;
+ }
+
+ return progress;
+ }
+
+ bool _trim()
+ {
+ /*
+ * TRIM is not implemented but nonetheless report success
+ * back to client as it merely is a hint.
+ */
+ success = true;
+ complete = true;
+ return true;
+ }
+
+ Job(Vfs::Vfs_handle &handle,
+ Block::Request request,
+ file_offset base_offset,
+ char *data,
+ file_size length)
+ :
+ _handle { handle },
+ request { request },
+ data { data },
+ state { _initial_state(request.operation.type) },
+ base_offset { base_offset },
+ current_offset { 0 },
+ current_count { length },
+ success { false },
+ complete { false }
+ { }
+
+ bool completed() const { return complete; }
+ bool succeeded() const { return success; }
+
+ void print(Genode::Output &out) const
+ {
+ Genode::print(out, "(", request.operation, ")",
+ " state: ", _state_to_string(state),
+ " base_offset: ", base_offset,
+ " current_offset: ", current_offset,
+ " current_count: ", current_count,
+ " success: ", success,
+ " complete: ", complete);
+ }
+
+ bool execute()
+ {
+ using Type = Block::Operation::Type;
+
+ switch (request.operation.type) {
+ case Type::READ: return _read();
+ case Type::WRITE: return _write();
+ case Type::SYNC: return _sync();
+ case Type::TRIM: return _trim();
+ default: return false;
+ }
+ }
+ };
+
+} /* namespace Vfs_block */
+
+#endif /* _VFS_BLOCK__JOB_ */
diff --git a/repos/os/src/server/vfs_block/target.mk b/repos/os/src/server/vfs_block/target.mk
new file mode 100644
index 0000000000..9af177a9a9
--- /dev/null
+++ b/repos/os/src/server/vfs_block/target.mk
@@ -0,0 +1,3 @@
+TARGET = vfs_block
+SRC_CC = component.cc
+LIBS = base vfs