mirror of
https://github.com/genodelabs/genode.git
synced 2024-12-19 21:57:55 +00:00
tresor: improved module framework and clean-up
* Make command pool a proper module * The command pool used to be kind of a module but it was driven via custom tresor-tester specific code. Now, it becomes a proper module that is driven by the module framework instead. * Move the code for creating and handling the module-execution progress flag into Module_composition::execute_modules as the function is always used with this code surrounding it. * Reorganize files, remove deprecated files * A new class Module_channel is introduced in the module framework and all channel classes inherit from it. With that class in place, the formerly module-specific implementations of the following methods are replaced by new generic implementations in the Module framework: * ready_to_submit_request * submit_request * _peek_completed_request * _drop_completed_request * _peek_generated_request * _drop_generated_request * generated_request_complete * Module requests are now held for the duration of their lifetime at the module they originate from and not, like before, at their target module. As a result, modules can generate new requests inline (without having to wait for the target module), making code much simpler to read, reducing the amount of channel state, and allowing for non-copyable request types. * Introduce a sub-state-machine for securing a superblock in the superblock_control module in order to reduce redundancy. * Some modules, like free_tree, were completely re-designed in order to make them more readable. * Replace all conditional exceptions by using the macros in tresor/assertion.h . * Move methods that are used in multiple modules but that were implemented redundantly in each module to tresor/types.h. * Remove verbosity node and all that was related to it from tresor tester config as the targeted verbosity can be achieved with the VERBOSE_MODULE_COMMUNICATION flag in tresor/verbosity.h . * Extract the aspect of translating the byte-granular I/O-requests to tresor-block requests from the tresor VFS-plugin and move it to a new module called splitter. * Rename the files and interface of the hashing back-end to not reflect the used hashing algorithm/config anymore, while at the same time making the hashing interface strict regarding the used types. * Introduce the NONCOPYABLE macro that makes marking a class noncopyable short and clear. * Replace the former tresor/vfs_utilities.h/.cc with a new tresor/file.h that contains the classes Read_write_file and Write_only_file. These classes significantly simplify the modules crypto, block_io, and trust_anchor by moving the details of file access to a sub-state machine. * The former, rather trivial block allocator module is replaced by a normal object of type Pba_allocator that must be provided by the client of the Sb_initializer (reference in the Sb_initializer_request). Ref #5062 tresor: read uninitialized vbas as all zeroes Virtual addresses in a Tresor container that were not yet written by the user should always return a data block that is all-zeroes. This was the concept right from the beginning of the project. However, somehow this aspect either never got implement or got lost along the way. Some context for understanding the commit: The Tresor doesn't initialize the payload data blocks of a container when creating a new container as this would be rather expensive. Instead, it marks the leaf metadata nodes of the virtual-block-device tree (those that reference the payload data blocks in physical address space) with generation 0. Now, this commit ensures that, whenever the virtual-block-device module reads such a generation-0 leaf, instead of asking the block_io and crypto to deliver data from disc, it directly provides the user with 4K of zeroes. Ref #5062
This commit is contained in:
parent
82388f4389
commit
d8a71e5978
@ -2,7 +2,7 @@ TRESOR_DIR := $(REP_DIR)/src/lib/tresor
|
||||
|
||||
SRC_CC += crypto.cc
|
||||
SRC_CC += request_pool.cc
|
||||
SRC_CC += sha256_4k_hash.cc
|
||||
SRC_CC += hash.cc
|
||||
SRC_CC += trust_anchor.cc
|
||||
SRC_CC += block_io.cc
|
||||
SRC_CC += meta_tree.cc
|
||||
@ -10,12 +10,9 @@ SRC_CC += virtual_block_device.cc
|
||||
SRC_CC += superblock_control.cc
|
||||
SRC_CC += free_tree.cc
|
||||
SRC_CC += module.cc
|
||||
SRC_CC += block_allocator.cc
|
||||
SRC_CC += vbd_initializer.cc
|
||||
SRC_CC += ft_initializer.cc
|
||||
SRC_CC += sb_initializer.cc
|
||||
SRC_CC += vfs_utilities.cc
|
||||
SRC_CC += ft_resizing.cc
|
||||
SRC_CC += sb_check.cc
|
||||
SRC_CC += vbd_check.cc
|
||||
SRC_CC += ft_check.cc
|
||||
|
@ -1,6 +1,6 @@
|
||||
LIB_DIR := $(REP_DIR)/src/lib/vfs/tresor
|
||||
|
||||
SRC_CC := vfs.cc
|
||||
SRC_CC := vfs.cc splitter.cc
|
||||
|
||||
INC_DIR += $(LIB_DIR)
|
||||
|
||||
|
@ -1,11 +1,11 @@
|
||||
SRC_CC := vfs.cc
|
||||
SRC_CC += vfs.cc
|
||||
SRC_CC += aes_cbc.cc
|
||||
|
||||
vpath % $(REP_DIR)/src/lib/vfs/tresor_crypto
|
||||
|
||||
INC_DIR += $(REP_DIR)/src/lib/vfs/tresor_crypto
|
||||
INC_DIR += $(REP_DIR)/src/lib/tresor/include
|
||||
|
||||
LIBS += aes_cbc_4k
|
||||
|
||||
vpath vfs.cc $(REP_DIR)/src/lib/vfs/tresor_crypto/
|
||||
vpath % $(REP_DIR)/src/lib/vfs/tresor_crypto/aes_cbc
|
||||
|
||||
SHARED_LIB = yes
|
||||
|
@ -1,9 +1,9 @@
|
||||
SRC_CC := vfs.cc
|
||||
SRC_CC += vfs.cc
|
||||
SRC_CC += memcopy.cc
|
||||
|
||||
vpath %.cc $(REP_DIR)/src/lib/vfs/tresor_crypto
|
||||
|
||||
INC_DIR += $(REP_DIR)/src/lib/vfs/tresor_crypto
|
||||
INC_DIR += $(REP_DIR)/src/lib/tresor/include
|
||||
|
||||
vpath vfs.cc $(REP_DIR)/src/lib/vfs/tresor_crypto/
|
||||
vpath %.cc $(REP_DIR)/src/lib/vfs/tresor_crypto/memcopy
|
||||
|
||||
SHARED_LIB = yes
|
||||
|
@ -1,6 +1,5 @@
|
||||
aes_cbc_4k
|
||||
base
|
||||
block_session
|
||||
libc
|
||||
openssl
|
||||
os
|
||||
|
@ -7,9 +7,7 @@ MIRROR_FROM_REP_DIR := \
|
||||
lib/mk/vfs_tresor_crypto_memcopy.mk \
|
||||
lib/mk/vfs_tresor_trust_anchor.mk \
|
||||
src/lib/vfs/tresor \
|
||||
src/lib/vfs/tresor_crypto/vfs.cc \
|
||||
src/lib/vfs/tresor_crypto/aes_cbc \
|
||||
src/lib/vfs/tresor_crypto/memcopy \
|
||||
src/lib/vfs/tresor_crypto \
|
||||
src/lib/vfs/tresor_trust_anchor \
|
||||
src/app/tresor_init \
|
||||
src/app/tresor_init_trust_anchor \
|
||||
|
@ -222,17 +222,6 @@ append config {
|
||||
<resource name="RAM" quantum="10M"/>
|
||||
<config ld_verbose="yes">
|
||||
|
||||
<verbose
|
||||
cmd_pool_cmd_pending="no"
|
||||
cmd_pool_cmd_in_progress="no"
|
||||
cmd_pool_cmd_completed="no"
|
||||
blk_io_req_in_progress="no"
|
||||
blk_io_req_completed="no"
|
||||
ta_req_in_progress="no"
|
||||
ta_req_completed="no"
|
||||
client_data_mismatch="yes"
|
||||
client_data_transferred="no"/>
|
||||
|
||||
<block-io type="vfs" path="/} [tresor_image_name] {"/>
|
||||
<crypto path="/crypto"/>
|
||||
<trust-anchor path="/trust_anchor"/>
|
||||
@ -349,11 +338,10 @@ append config {
|
||||
<request op="read" vba="22" count="3" sync="yes" salt="9612"/>
|
||||
<request op="read" vba="15" count="2" sync="yes" salt="6111"/>
|
||||
<request op="write" vba="11" count="6" sync="yes" salt="5436"/>
|
||||
<list-snapshots/>
|
||||
<check-snapshots/>
|
||||
<request op="discard_snapshot" sync="yes" id="1"/>
|
||||
<request op="discard_snapshot" sync="yes" id="2"/>
|
||||
<list-snapshots/>
|
||||
<request op="discard_snapshot" sync="yes" id="3"/>
|
||||
<check-snapshots/>
|
||||
<request op="sync" vba="0" count="256" sync="yes"/>
|
||||
<check/>
|
||||
|
||||
@ -382,7 +370,6 @@ append config {
|
||||
<request op="write" vba="21" count="2" sync="no" salt="8094"/>
|
||||
<request op="write" vba="12" count="10" sync="no" salt="4455"/>
|
||||
<request op="write" vba="26" count="4" sync="no" salt="7574"/>
|
||||
<request op="discard_snapshot" sync="no" id="4"/>
|
||||
<request op="write" vba="15" count="7" sync="no" salt="1931"/>
|
||||
<request op="read" vba="23" count="1" sync="no" salt="7463"/>
|
||||
<request op="read" vba="25" count="1" sync="no" salt="4323"/>
|
||||
@ -392,7 +379,8 @@ append config {
|
||||
<request op="read" vba="12" count="3" sync="no" salt="4455"/>
|
||||
<request op="read" vba="19" count="3" sync="no" salt="1931"/>
|
||||
<request op="read" vba="10" count="2" sync="no" salt="3758"/>
|
||||
<list-snapshots/>
|
||||
<check-snapshots/>
|
||||
<request op="discard_snapshot" sync="no" id="4"/>
|
||||
<request op="sync" vba="0" count="256" sync="yes"/>
|
||||
<check/>
|
||||
|
||||
@ -1073,15 +1061,14 @@ if {[benchmark_blk_count] > 0} {
|
||||
}
|
||||
append config {
|
||||
|
||||
<log string="Step 18: test list-snapshots command"/>
|
||||
<log string="Step 18: test check-snapshots command"/>
|
||||
|
||||
<request op="create_snapshot" sync="no" id="13"/>
|
||||
<request op="write" vba="17737" count="70" sync="no" salt="8924"/>
|
||||
<request op="create_snapshot" sync="no" id="14"/>
|
||||
<request op="write" vba="00129" count="30" sync="no" salt="9471"/>
|
||||
<request op="create_snapshot" sync="no" id="15"/>
|
||||
|
||||
<list-snapshots/>
|
||||
<check-snapshots/>
|
||||
|
||||
</commands>
|
||||
|
||||
|
@ -52,10 +52,10 @@ struct File_vault::Ui_config
|
||||
{
|
||||
using Version_string = String<80>;
|
||||
|
||||
Version_string const version { };
|
||||
Passphrase_string const passphrase { };
|
||||
Number_of_bytes const client_fs_size { 0 };
|
||||
Number_of_bytes const journaling_buf_size { 0 };
|
||||
Version_string const version { };
|
||||
Passphrase const passphrase { };
|
||||
Number_of_bytes const client_fs_size { 0 };
|
||||
Number_of_bytes const journaling_buf_size { 0 };
|
||||
|
||||
Ui_config() { }
|
||||
|
||||
@ -63,7 +63,7 @@ struct File_vault::Ui_config
|
||||
bool verbose)
|
||||
:
|
||||
version { node.attribute_value("version", Version_string { }) },
|
||||
passphrase { node.attribute_value("passphrase", Passphrase_string { }) },
|
||||
passphrase { node.attribute_value("passphrase", Passphrase { }) },
|
||||
client_fs_size { node.attribute_value("client_fs_size", Number_of_bytes { 0 }) },
|
||||
journaling_buf_size { node.attribute_value("journaling_buf_size", Number_of_bytes { 0 }) }
|
||||
{
|
||||
@ -495,7 +495,7 @@ class File_vault::Main
|
||||
throw Exception_1 { };
|
||||
}
|
||||
|
||||
Passphrase_string _ui_setup_obtain_params_passphrase() const
|
||||
Passphrase _ui_setup_obtain_params_passphrase() const
|
||||
{
|
||||
switch (_user_interface) {
|
||||
case MENU_VIEW: return _setup_obtain_params_passphrase.plaintext().string();
|
||||
|
@ -642,9 +642,9 @@ namespace File_vault {
|
||||
});
|
||||
}
|
||||
|
||||
void gen_tresor_init_trust_anchor_start_node(Xml_generator &xml,
|
||||
Child_state const &child,
|
||||
Passphrase_string const &passphrase)
|
||||
void gen_tresor_init_trust_anchor_start_node(Xml_generator &xml,
|
||||
Child_state const &child,
|
||||
Passphrase const &passphrase)
|
||||
{
|
||||
child.gen_start_node(xml, [&] () {
|
||||
|
||||
|
@ -25,7 +25,6 @@ namespace File_vault {
|
||||
using namespace Genode;
|
||||
|
||||
using Node_name = String<32>;
|
||||
using Passphrase_string = String<64>;
|
||||
using File_path = String<32>;
|
||||
|
||||
class Tree_geometry
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* \brief Integration of the Tresor block encryption
|
||||
* \brief Verify the dimensions and hashes of a tresor container
|
||||
* \author Martin Stein
|
||||
* \author Josef Soentgen
|
||||
* \date 2020-11-10
|
||||
|
@ -12,8 +12,8 @@
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
#ifndef _TRESOR__INIT__CONFIGURATION_H_
|
||||
#define _TRESOR__INIT__CONFIGURATION_H_
|
||||
#ifndef _TRESOR_INIT__CONFIGURATION_H_
|
||||
#define _TRESOR_INIT__CONFIGURATION_H_
|
||||
|
||||
/* base includes */
|
||||
#include <util/xml_node.h>
|
||||
@ -40,49 +40,35 @@ class Tresor_init::Configuration
|
||||
uint64_t _ft_nr_of_children { 0 };
|
||||
uint64_t _ft_nr_of_leafs { 0 };
|
||||
|
||||
static bool _is_power_of_2(uint64_t val)
|
||||
{
|
||||
for (; val && (val & 1) == 0; val >>= 1);
|
||||
return val == 1;
|
||||
}
|
||||
|
||||
public:
|
||||
|
||||
struct Invalid : Exception { };
|
||||
|
||||
Configuration (Xml_node const &node)
|
||||
{
|
||||
node.with_optional_sub_node("virtual-block-device",
|
||||
[&] (Xml_node const &vbd)
|
||||
node.with_optional_sub_node("virtual-block-device", [&] (Xml_node const &vbd)
|
||||
{
|
||||
_vbd_nr_of_lvls =
|
||||
vbd.attribute_value("nr_of_levels", (uint64_t)0);
|
||||
_vbd_nr_of_children =
|
||||
vbd.attribute_value("nr_of_children", (uint64_t)0);
|
||||
_vbd_nr_of_leafs =
|
||||
vbd.attribute_value("nr_of_leafs", (uint64_t)0);
|
||||
_vbd_nr_of_lvls = vbd.attribute_value("nr_of_levels", (uint64_t)0);
|
||||
_vbd_nr_of_children = vbd.attribute_value("nr_of_children", (uint64_t)0);
|
||||
_vbd_nr_of_leafs = vbd.attribute_value("nr_of_leafs", (uint64_t)0);
|
||||
});
|
||||
node.with_optional_sub_node("free-tree",
|
||||
[&] (Xml_node const &ft)
|
||||
node.with_optional_sub_node("free-tree", [&] (Xml_node const &ft)
|
||||
{
|
||||
_ft_nr_of_lvls =
|
||||
ft.attribute_value("nr_of_levels", (uint64_t)0);
|
||||
_ft_nr_of_children =
|
||||
ft.attribute_value("nr_of_children", (uint64_t)0);
|
||||
_ft_nr_of_leafs =
|
||||
ft.attribute_value("nr_of_leafs", (uint64_t)0);
|
||||
_ft_nr_of_lvls = ft.attribute_value("nr_of_levels", (uint64_t)0);
|
||||
_ft_nr_of_children = ft.attribute_value("nr_of_children", (uint64_t)0);
|
||||
_ft_nr_of_leafs = ft.attribute_value("nr_of_leafs", (uint64_t)0);
|
||||
});
|
||||
ASSERT(_vbd_nr_of_lvls);
|
||||
ASSERT(_vbd_nr_of_lvls <= TREE_MAX_NR_OF_LEVELS);
|
||||
ASSERT(_vbd_nr_of_leafs);
|
||||
ASSERT(_is_power_of_2(_vbd_nr_of_children));
|
||||
ASSERT(_vbd_nr_of_children <= NR_OF_T1_NODES_PER_BLK);
|
||||
ASSERT(is_power_of_2(_vbd_nr_of_children));
|
||||
ASSERT(_vbd_nr_of_children <= NUM_NODES_PER_BLK);
|
||||
ASSERT(_ft_nr_of_lvls);
|
||||
ASSERT(_ft_nr_of_lvls <= TREE_MAX_NR_OF_LEVELS);
|
||||
ASSERT(_ft_nr_of_leafs);
|
||||
ASSERT(_is_power_of_2(_ft_nr_of_children));
|
||||
ASSERT(_ft_nr_of_children <= NR_OF_T1_NODES_PER_BLK);
|
||||
ASSERT(_ft_nr_of_children <= NR_OF_T2_NODES_PER_BLK);
|
||||
ASSERT(is_power_of_2(_ft_nr_of_children));
|
||||
ASSERT(_ft_nr_of_children <= NUM_NODES_PER_BLK);
|
||||
ASSERT(_ft_nr_of_children <= NUM_NODES_PER_BLK);
|
||||
}
|
||||
|
||||
Configuration (Configuration const &other)
|
||||
@ -101,17 +87,6 @@ class Tresor_init::Configuration
|
||||
uint64_t ft_nr_of_lvls () const { return _ft_nr_of_lvls ; }
|
||||
uint64_t ft_nr_of_children () const { return _ft_nr_of_children ; }
|
||||
uint64_t ft_nr_of_leafs () const { return _ft_nr_of_leafs ; }
|
||||
|
||||
void print(Output &out) const
|
||||
{
|
||||
Genode::print(out,
|
||||
"vbd=(lvls=", _vbd_nr_of_lvls,
|
||||
" children=", _vbd_nr_of_children,
|
||||
" leafs=", _vbd_nr_of_leafs, ")",
|
||||
" ft=(lvls=", _ft_nr_of_lvls,
|
||||
" children=", _ft_nr_of_children,
|
||||
" leafs=", _ft_nr_of_leafs, ")");
|
||||
}
|
||||
};
|
||||
|
||||
#endif /* _TRESOR__INIT__CONFIGURATION_H_ */
|
||||
#endif /* _TRESOR_INIT__CONFIGURATION_H_ */
|
@ -16,221 +16,110 @@
|
||||
#include <base/attached_rom_dataspace.h>
|
||||
#include <base/component.h>
|
||||
#include <base/heap.h>
|
||||
#include <block_session/connection.h>
|
||||
#include <os/path.h>
|
||||
#include <vfs/dir_file_system.h>
|
||||
#include <vfs/file_system_factory.h>
|
||||
#include <vfs/simple_env.h>
|
||||
|
||||
/* tresor includes */
|
||||
#include <tresor/init/configuration.h>
|
||||
#include <tresor/block_allocator.h>
|
||||
#include <tresor/block_io.h>
|
||||
#include <tresor/crypto.h>
|
||||
#include <tresor/trust_anchor.h>
|
||||
#include <tresor/ft_initializer.h>
|
||||
#include <tresor/sb_initializer.h>
|
||||
#include <tresor/trust_anchor.h>
|
||||
#include <tresor/vbd_initializer.h>
|
||||
|
||||
|
||||
enum { VERBOSE = 0 };
|
||||
/* tresor init includes */
|
||||
#include <tresor_init/configuration.h>
|
||||
|
||||
using namespace Genode;
|
||||
using namespace Tresor;
|
||||
|
||||
static Block_allocator *_block_allocator_ptr;
|
||||
namespace Tresor_init { class Main; }
|
||||
|
||||
|
||||
Genode::uint64_t block_allocator_first_block()
|
||||
{
|
||||
if (!_block_allocator_ptr) {
|
||||
struct Exception_1 { };
|
||||
throw Exception_1();
|
||||
}
|
||||
|
||||
return _block_allocator_ptr->first_block();
|
||||
}
|
||||
|
||||
|
||||
Genode::uint64_t block_allocator_nr_of_blks()
|
||||
{
|
||||
if (!_block_allocator_ptr) {
|
||||
struct Exception_1 { };
|
||||
throw Exception_1();
|
||||
}
|
||||
|
||||
return _block_allocator_ptr->nr_of_blks();
|
||||
}
|
||||
|
||||
|
||||
class Main
|
||||
:
|
||||
private Vfs::Env::User,
|
||||
private Tresor::Module_composition,
|
||||
public Tresor::Module
|
||||
class Tresor_init::Main : private Vfs::Env::User, private Tresor::Module_composition, public Tresor::Module, public Module_channel
|
||||
{
|
||||
private:
|
||||
|
||||
/*
|
||||
* Noncopyable
|
||||
*/
|
||||
Main(Main const &) = delete;
|
||||
Main &operator = (Main const &) = delete;
|
||||
enum State { INIT, REQ_GENERATED, INIT_SBS_SUCCEEDED };
|
||||
|
||||
Env &_env;
|
||||
Heap _heap { _env.ram(), _env.rm() };
|
||||
|
||||
Attached_rom_dataspace _config_rom { _env, "config" };
|
||||
|
||||
Vfs::Simple_env _vfs_env { _env, _heap, _config_rom.xml().sub_node("vfs"), *this };
|
||||
Vfs::File_system &_vfs { _vfs_env.root_dir() };
|
||||
Signal_handler<Main> _sigh { _env.ep(), *this, &Main::_execute };
|
||||
|
||||
Constructible<Tresor_init::Configuration> _cfg { };
|
||||
|
||||
Trust_anchor _trust_anchor { _vfs_env, _config_rom.xml().sub_node("trust-anchor") };
|
||||
Crypto _crypto { _vfs_env, _config_rom.xml().sub_node("crypto") };
|
||||
Block_io _block_io { _vfs_env, _config_rom.xml().sub_node("block-io") };
|
||||
Block_allocator _block_allocator { NR_OF_SUPERBLOCK_SLOTS };
|
||||
Vfs::Simple_env _vfs_env { _env, _heap, _config_rom.xml().sub_node("vfs"), *this };
|
||||
Signal_handler<Main> _sigh { _env.ep(), *this, &Main::_handle_signal };
|
||||
Constructible<Configuration> _cfg { };
|
||||
Trust_anchor _trust_anchor { _vfs_env, _config_rom.xml().sub_node("trust-anchor") };
|
||||
Crypto _crypto { _vfs_env, _config_rom.xml().sub_node("crypto") };
|
||||
Block_io _block_io { _vfs_env, _config_rom.xml().sub_node("block-io") };
|
||||
Pba_allocator _pba_alloc { NR_OF_SUPERBLOCK_SLOTS };
|
||||
Vbd_initializer _vbd_initializer { };
|
||||
Ft_initializer _ft_initializer { };
|
||||
Sb_initializer _sb_initializer { };
|
||||
Ft_initializer _ft_initializer { };
|
||||
Sb_initializer _sb_initializer { };
|
||||
bool _generated_req_success { };
|
||||
State _state { INIT };
|
||||
|
||||
NONCOPYABLE(Main);
|
||||
|
||||
void _generated_req_completed(State_uint state_uint) override
|
||||
{
|
||||
if (!_generated_req_success) {
|
||||
error("command pool: request failed because generated request failed)");
|
||||
_env.parent().exit(-1);
|
||||
return;
|
||||
}
|
||||
_state = (State)state_uint;
|
||||
}
|
||||
|
||||
/**
|
||||
* Vfs::Env::User interface
|
||||
*/
|
||||
void wakeup_vfs_user() override { _sigh.local_submit(); }
|
||||
|
||||
void _execute()
|
||||
void _wakeup_back_end_services() { _vfs_env.io().commit(); }
|
||||
|
||||
void _handle_signal()
|
||||
{
|
||||
bool progress { true };
|
||||
while (progress) {
|
||||
|
||||
progress = false;
|
||||
execute_modules(progress);
|
||||
}
|
||||
|
||||
_vfs_env.io().commit();
|
||||
|
||||
if (_state == COMPLETE)
|
||||
_env.parent().exit(0);
|
||||
}
|
||||
|
||||
/****************
|
||||
** Module API **
|
||||
****************/
|
||||
|
||||
enum State { INVALID, PENDING, IN_PROGRESS, COMPLETE };
|
||||
|
||||
State _state { INVALID };
|
||||
|
||||
bool _peek_generated_request(Genode::uint8_t *buf_ptr,
|
||||
Genode::size_t buf_size) override
|
||||
{
|
||||
if (_state != PENDING)
|
||||
return false;
|
||||
|
||||
Sb_initializer_request::create(
|
||||
buf_ptr, buf_size, COMMAND_POOL, 0,
|
||||
(unsigned long)Sb_initializer_request::INIT,
|
||||
(Tree_level_index)_cfg->vbd_nr_of_lvls() - 1,
|
||||
(Tree_degree)_cfg->vbd_nr_of_children(),
|
||||
_cfg->vbd_nr_of_leafs(),
|
||||
(Tree_level_index)_cfg->ft_nr_of_lvls() - 1,
|
||||
(Tree_degree)_cfg->ft_nr_of_children(),
|
||||
_cfg->ft_nr_of_leafs(),
|
||||
(Tree_level_index)_cfg->ft_nr_of_lvls() - 1,
|
||||
(Tree_degree)_cfg->ft_nr_of_children(),
|
||||
_cfg->ft_nr_of_leafs());
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void _drop_generated_request(Module_request &mod_req) override
|
||||
{
|
||||
if (_state != PENDING) {
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
|
||||
switch (mod_req.dst_module_id()) {
|
||||
case SB_INITIALIZER:
|
||||
_state = IN_PROGRESS;
|
||||
break;
|
||||
default:
|
||||
class Exception_2 { };
|
||||
throw Exception_2 { };
|
||||
}
|
||||
}
|
||||
|
||||
void generated_request_complete(Module_request &mod_req) override
|
||||
{
|
||||
if (_state != IN_PROGRESS) {
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
|
||||
switch (mod_req.dst_module_id()) {
|
||||
case SB_INITIALIZER:
|
||||
_state = COMPLETE;
|
||||
break;
|
||||
default:
|
||||
class Exception_2 { };
|
||||
throw Exception_2 { };
|
||||
}
|
||||
execute_modules();
|
||||
_wakeup_back_end_services();
|
||||
}
|
||||
|
||||
public:
|
||||
|
||||
Main(Env &env) : _env { env }
|
||||
Main(Env &env) : Module_channel { COMMAND_POOL, 0 }, _env { env }
|
||||
{
|
||||
add_module(COMMAND_POOL, *this);
|
||||
add_module(CRYPTO, _crypto);
|
||||
add_module(TRUST_ANCHOR, _trust_anchor);
|
||||
add_module(BLOCK_IO, _block_io);
|
||||
add_module(BLOCK_ALLOCATOR, _block_allocator);
|
||||
add_module(VBD_INITIALIZER, _vbd_initializer);
|
||||
add_module(FT_INITIALIZER, _ft_initializer);
|
||||
add_module(SB_INITIALIZER, _sb_initializer);
|
||||
add_module(COMMAND_POOL, *this);
|
||||
add_module(CRYPTO, _crypto);
|
||||
add_module(TRUST_ANCHOR, _trust_anchor);
|
||||
add_module(BLOCK_IO, _block_io);
|
||||
add_module(VBD_INITIALIZER, _vbd_initializer);
|
||||
add_module(FT_INITIALIZER, _ft_initializer);
|
||||
add_module(SB_INITIALIZER, _sb_initializer);
|
||||
add_channel(*this);
|
||||
_cfg.construct(_config_rom.xml());
|
||||
_handle_signal();
|
||||
}
|
||||
|
||||
_block_allocator_ptr = &_block_allocator;
|
||||
void execute(bool &progress) override
|
||||
{
|
||||
switch(_state) {
|
||||
case INIT:
|
||||
|
||||
Xml_node const &config { _config_rom.xml() };
|
||||
try {
|
||||
_cfg.construct(config);
|
||||
_state = PENDING;
|
||||
generate_req<Sb_initializer_request>(
|
||||
INIT_SBS_SUCCEEDED, progress, (Tree_level_index)_cfg->vbd_nr_of_lvls() - 1,
|
||||
(Tree_degree)_cfg->vbd_nr_of_children(), _cfg->vbd_nr_of_leafs(),
|
||||
(Tree_level_index)_cfg->ft_nr_of_lvls() - 1,
|
||||
(Tree_degree)_cfg->ft_nr_of_children(), _cfg->ft_nr_of_leafs(),
|
||||
(Tree_level_index)_cfg->ft_nr_of_lvls() - 1,
|
||||
(Tree_degree)_cfg->ft_nr_of_children(), _cfg->ft_nr_of_leafs(), _pba_alloc,
|
||||
_generated_req_success);
|
||||
_state = REQ_GENERATED;
|
||||
break;
|
||||
|
||||
_execute();
|
||||
}
|
||||
catch (Tresor_init::Configuration::Invalid) {
|
||||
error("bad configuration");
|
||||
_env.parent().exit(-1);
|
||||
case INIT_SBS_SUCCEEDED: _env.parent().exit(0); break;
|
||||
default: break;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
void Component::construct(Genode::Env &env) { static Tresor_init::Main main { env }; }
|
||||
|
||||
void Component::construct(Genode::Env &env)
|
||||
{
|
||||
env.exec_static_constructors();
|
||||
|
||||
static Main main(env);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* XXX Libc::Component::construct is needed for linking libcrypto
|
||||
* because it depends on the libc but does not need to be
|
||||
* executed.
|
||||
*/
|
||||
namespace Libc {
|
||||
|
||||
struct Env;
|
||||
|
||||
struct Component
|
||||
{
|
||||
void construct(Libc::Env &);
|
||||
};
|
||||
struct Component { void construct(Libc::Env &) { } };
|
||||
}
|
||||
|
||||
|
||||
void Libc::Component::construct(Libc::Env &) { }
|
||||
|
@ -1,10 +1,7 @@
|
||||
TARGET := tresor_init
|
||||
|
||||
SRC_CC += main.cc
|
||||
|
||||
INC_DIR += $(PRG_DIR)
|
||||
|
||||
LIBS += base
|
||||
LIBS += tresor
|
||||
INC_DIR += $(PRG_DIR)/include
|
||||
LIBS += base tresor
|
||||
|
||||
CONFIG_XSD := config.xsd
|
||||
|
@ -22,8 +22,11 @@
|
||||
#include <vfs/file_system_factory.h>
|
||||
#include <vfs/simple_env.h>
|
||||
|
||||
/* Tresor includes */
|
||||
#include <tresor/vfs/io_job.h>
|
||||
/* tresor includes */
|
||||
#include <tresor/types.h>
|
||||
|
||||
/* vfs tresor trust anchor includes */
|
||||
#include <io_job.h>
|
||||
|
||||
using namespace Genode;
|
||||
|
||||
@ -44,8 +47,6 @@ class Main : Vfs::Env::User
|
||||
|
||||
Vfs::File_system &_vfs { _vfs_env.root_dir() };
|
||||
|
||||
using Initialize_file_buf = Genode::String<32 + 1>;
|
||||
|
||||
using String_path = Genode::String<256>;
|
||||
|
||||
static String_path _config_ta_dir(Xml_node const &node)
|
||||
@ -82,7 +83,7 @@ class Main : Vfs::Env::User
|
||||
Genode::Constructible<Util::Io_job> _io_job { };
|
||||
Util::Io_job::Buffer _io_buffer { };
|
||||
|
||||
Initialize_file_buf _initialize_file_buf { };
|
||||
Tresor::Passphrase _initialize_file_buf { };
|
||||
|
||||
File(char const *base_path,
|
||||
char const *name,
|
||||
@ -112,7 +113,7 @@ class Main : Vfs::Env::User
|
||||
_vfs.close(_vfs_handle);
|
||||
}
|
||||
|
||||
void write_passphrase(Initialize_file_buf const &passphrase)
|
||||
void write_passphrase(Tresor::Passphrase const &passphrase)
|
||||
{
|
||||
/* copy */
|
||||
_initialize_file_buf = passphrase;
|
||||
@ -212,8 +213,8 @@ class Main : Vfs::Env::User
|
||||
{
|
||||
Xml_node const &config { _config_rom.xml() };
|
||||
|
||||
Initialize_file_buf const passphrase =
|
||||
config.attribute_value("passphrase", Initialize_file_buf());
|
||||
Tresor::Passphrase const passphrase =
|
||||
config.attribute_value("passphrase", Tresor::Passphrase());
|
||||
|
||||
if (!passphrase.valid()) {
|
||||
error("mandatory 'passphrase' attribute missing");
|
||||
|
@ -1,7 +1,9 @@
|
||||
TARGET:= tresor_init_trust_anchor
|
||||
TARGET := tresor_init_trust_anchor
|
||||
|
||||
SRC_CC += component.cc
|
||||
|
||||
SRC_CC := component.cc
|
||||
INC_DIR += $(PRG_DIR)
|
||||
INC_DIR += $(REP_DIR)/src/lib/vfs/tresor_trust_anchor
|
||||
INC_DIR += $(REP_DIR)/src/lib/tresor/include
|
||||
|
||||
LIBS := base vfs
|
||||
LIBS += base vfs
|
||||
|
@ -1,339 +0,0 @@
|
||||
/*
|
||||
* \brief Implementation of the Crypto module API using the Crypto VFS API
|
||||
* \author Martin Stein
|
||||
* \date 2020-10-29
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2020 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
/* local includes */
|
||||
#include <crypto.h>
|
||||
|
||||
using namespace Genode;
|
||||
using namespace Tresor;
|
||||
using namespace Vfs;
|
||||
|
||||
|
||||
Crypto::Key_directory &Crypto::_get_unused_key_dir()
|
||||
{
|
||||
for (Key_directory &key_dir : _key_dirs) {
|
||||
if (key_dir.key_id == 0) {
|
||||
return key_dir;
|
||||
}
|
||||
}
|
||||
class Failed { };
|
||||
throw Failed { };
|
||||
}
|
||||
|
||||
|
||||
Crypto::Key_directory &Crypto::_lookup_key_dir(uint32_t key_id)
|
||||
{
|
||||
for (Key_directory &key_dir : _key_dirs) {
|
||||
if (key_dir.key_id == key_id) {
|
||||
return key_dir;
|
||||
}
|
||||
}
|
||||
class Failed { };
|
||||
throw Failed { };
|
||||
}
|
||||
|
||||
|
||||
Crypto::Crypto(Vfs::Env &env, Xml_node const &crypto)
|
||||
:
|
||||
_env { env },
|
||||
_path { crypto.attribute_value("path", String<32>()) },
|
||||
_add_key_handle { vfs_open_wo(env, { _path.string(), "/add_key" }) },
|
||||
_remove_key_handle { vfs_open_wo(env, { _path.string(), "/remove_key" }) }
|
||||
{ }
|
||||
|
||||
|
||||
bool Crypto::request_acceptable() const
|
||||
{
|
||||
return _job.op == Operation::INVALID;
|
||||
}
|
||||
|
||||
|
||||
Crypto::Result Crypto::add_key(Key const &key)
|
||||
{
|
||||
char buffer[sizeof (key.value) + sizeof (key.id.value)] { };
|
||||
memcpy(buffer, &key.id.value, sizeof (key.id.value));
|
||||
memcpy(buffer + sizeof (key.id.value),
|
||||
key.value, sizeof (key.value));
|
||||
|
||||
_add_key_handle.seek(0);
|
||||
|
||||
size_t written_bytes = 0;
|
||||
|
||||
using Write_result = Vfs::File_io_service::Write_result;
|
||||
|
||||
Const_byte_range_ptr const src(buffer, sizeof(buffer));
|
||||
|
||||
Write_result const result =
|
||||
_add_key_handle.fs().write(&_add_key_handle, src, written_bytes);
|
||||
|
||||
if (result == Write_result::WRITE_ERR_WOULD_BLOCK)
|
||||
return Result::RETRY_LATER;
|
||||
|
||||
Key_directory &key_dir { _get_unused_key_dir() };
|
||||
|
||||
key_dir.encrypt_handle = &vfs_open_rw(
|
||||
_env, { _path.string(), "/keys/", key.id.value, "/encrypt" });
|
||||
|
||||
key_dir.decrypt_handle = &vfs_open_rw(
|
||||
_env, { _path.string(), "/keys/", key.id.value, "/decrypt" });
|
||||
|
||||
key_dir.key_id = key.id.value;
|
||||
return Result::SUCCEEDED;
|
||||
}
|
||||
|
||||
|
||||
Crypto::Result Crypto::remove_key(Tresor::Key::Id key_id)
|
||||
{
|
||||
size_t written_bytes = 0;
|
||||
_remove_key_handle.seek(0);
|
||||
|
||||
Const_byte_range_ptr const src((char *)&key_id.value, sizeof(key_id.value));
|
||||
|
||||
using Write_result = Vfs::File_io_service::Write_result;
|
||||
|
||||
Write_result const result =
|
||||
_remove_key_handle.fs().write(&_remove_key_handle, src, written_bytes);
|
||||
|
||||
if (result == Write_result::WRITE_ERR_WOULD_BLOCK)
|
||||
return Result::RETRY_LATER;
|
||||
|
||||
Key_directory &key_dir { _lookup_key_dir(key_id.value) };
|
||||
_env.root_dir().close(key_dir.encrypt_handle);
|
||||
key_dir.encrypt_handle = nullptr;
|
||||
_env.root_dir().close(key_dir.decrypt_handle);
|
||||
key_dir.decrypt_handle = nullptr;
|
||||
key_dir.key_id = 0;
|
||||
return Result::SUCCEEDED;
|
||||
}
|
||||
|
||||
|
||||
void Crypto::submit_request(Tresor::Request const &request,
|
||||
Operation op,
|
||||
Crypto_plain_buffer::Index plain_buf_idx,
|
||||
Crypto_cipher_buffer::Index cipher_buf_idx)
|
||||
{
|
||||
switch (op) {
|
||||
case Operation::ENCRYPT_BLOCK:
|
||||
|
||||
_job.request = request;
|
||||
_job.state = Job_state::SUBMITTED;
|
||||
_job.op = op;
|
||||
_job.cipher_buf_idx = cipher_buf_idx;
|
||||
_job.plain_buf_idx = plain_buf_idx;
|
||||
_job.handle =
|
||||
_lookup_key_dir(request.key_id()).encrypt_handle;
|
||||
|
||||
break;
|
||||
|
||||
case Operation::DECRYPT_BLOCK:
|
||||
|
||||
_job.request = request;
|
||||
_job.state = Job_state::SUBMITTED;
|
||||
_job.op = op;
|
||||
_job.cipher_buf_idx = cipher_buf_idx;
|
||||
_job.plain_buf_idx = plain_buf_idx;
|
||||
_job.handle =
|
||||
_lookup_key_dir(request.key_id()).decrypt_handle;
|
||||
|
||||
break;
|
||||
|
||||
case Operation::INVALID:
|
||||
|
||||
class Bad_operation { };
|
||||
throw Bad_operation { };
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Tresor::Request Crypto::peek_completed_encryption_request() const
|
||||
{
|
||||
if (_job.state != Job_state::COMPLETE ||
|
||||
_job.op != Operation::ENCRYPT_BLOCK) {
|
||||
|
||||
return Tresor::Request { };
|
||||
}
|
||||
return _job.request;
|
||||
}
|
||||
|
||||
|
||||
Tresor::Request Crypto::peek_completed_decryption_request() const
|
||||
{
|
||||
if (_job.state != Job_state::COMPLETE ||
|
||||
_job.op != Operation::DECRYPT_BLOCK) {
|
||||
|
||||
return Tresor::Request { };
|
||||
}
|
||||
return _job.request;
|
||||
}
|
||||
|
||||
|
||||
void Crypto::drop_completed_request()
|
||||
{
|
||||
if (_job.state != Job_state::COMPLETE) {
|
||||
|
||||
class Bad_state { };
|
||||
throw Bad_state { };
|
||||
}
|
||||
_job.op = Operation::INVALID;
|
||||
}
|
||||
|
||||
|
||||
void Crypto::_execute_decrypt_block(Job &job,
|
||||
Crypto_plain_buffer &plain_buf,
|
||||
Crypto_cipher_buffer &cipher_buf,
|
||||
bool &progress)
|
||||
{
|
||||
switch (job.state) {
|
||||
case Job_state::SUBMITTED:
|
||||
{
|
||||
job.handle->seek(job.request.block_number() * Tresor::BLOCK_SIZE);
|
||||
|
||||
size_t written_bytes = 0;
|
||||
|
||||
Const_byte_range_ptr const src(
|
||||
reinterpret_cast<char const*>(&cipher_buf.item(job.cipher_buf_idx)),
|
||||
sizeof(Tresor::Block));
|
||||
|
||||
job.handle->fs().write( job.handle, src, written_bytes);
|
||||
|
||||
job.state = Job_state::OP_WRITTEN_TO_VFS_HANDLE;
|
||||
progress = true;
|
||||
return;
|
||||
}
|
||||
case Job_state::OP_WRITTEN_TO_VFS_HANDLE:
|
||||
{
|
||||
job.handle->seek(job.request.block_number() * Tresor::BLOCK_SIZE);
|
||||
bool const success {
|
||||
job.handle->fs().queue_read(
|
||||
job.handle, sizeof (Tresor::Block)) };
|
||||
|
||||
if (!success) {
|
||||
return;
|
||||
}
|
||||
job.state = Job_state::READING_VFS_HANDLE_SUCCEEDED;
|
||||
progress = true;
|
||||
break;
|
||||
}
|
||||
case Job_state::READING_VFS_HANDLE_SUCCEEDED:
|
||||
{
|
||||
size_t read_bytes = 0;
|
||||
|
||||
Byte_range_ptr const dst(
|
||||
reinterpret_cast<char *>(&plain_buf.item(job.plain_buf_idx)),
|
||||
sizeof(Tresor::Block));
|
||||
|
||||
Read_result const result =
|
||||
job.handle->fs().complete_read(job.handle, dst, read_bytes);
|
||||
|
||||
switch (result) {
|
||||
case Read_result::READ_QUEUED: return;
|
||||
case Read_result::READ_ERR_WOULD_BLOCK: return;
|
||||
default: break;
|
||||
}
|
||||
job.request.success(result == Read_result::READ_OK);
|
||||
job.state = Job_state::COMPLETE;
|
||||
progress = true;
|
||||
return;
|
||||
}
|
||||
default:
|
||||
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Crypto::_execute_encrypt_block(Job &job,
|
||||
Crypto_plain_buffer &plain_buf,
|
||||
Crypto_cipher_buffer &cipher_buf,
|
||||
bool &progress)
|
||||
{
|
||||
switch (job.state) {
|
||||
case Job_state::SUBMITTED:
|
||||
{
|
||||
job.handle->seek(job.request.block_number() * Tresor::BLOCK_SIZE);
|
||||
|
||||
size_t written_bytes = 0;
|
||||
|
||||
Const_byte_range_ptr const src(
|
||||
reinterpret_cast<char const*>(
|
||||
&plain_buf.item(job.plain_buf_idx)),
|
||||
sizeof(Tresor::Block));
|
||||
|
||||
job.handle->fs().write(job.handle, src, written_bytes);
|
||||
|
||||
job.state = Job_state::OP_WRITTEN_TO_VFS_HANDLE;
|
||||
progress = true;
|
||||
return;
|
||||
}
|
||||
case Job_state::OP_WRITTEN_TO_VFS_HANDLE:
|
||||
{
|
||||
job.handle->seek(job.request.block_number() * Tresor::BLOCK_SIZE);
|
||||
bool success {
|
||||
job.handle->fs().queue_read(
|
||||
job.handle, sizeof (Tresor::Block)) };
|
||||
|
||||
if (!success) {
|
||||
return;
|
||||
}
|
||||
job.state = Job_state::READING_VFS_HANDLE_SUCCEEDED;
|
||||
progress = true;
|
||||
return;
|
||||
}
|
||||
case Job_state::READING_VFS_HANDLE_SUCCEEDED:
|
||||
{
|
||||
size_t read_bytes = 0;
|
||||
|
||||
Byte_range_ptr const dst(
|
||||
reinterpret_cast<char *>(&cipher_buf.item(job.cipher_buf_idx)),
|
||||
sizeof (Tresor::Block));
|
||||
|
||||
Read_result const result =
|
||||
job.handle->fs().complete_read(job.handle, dst, read_bytes);
|
||||
|
||||
switch (result) {
|
||||
case Read_result::READ_QUEUED: return;
|
||||
case Read_result::READ_ERR_WOULD_BLOCK: return;
|
||||
default: break;
|
||||
}
|
||||
job.request.success(result == Read_result::READ_OK);
|
||||
job.state = Job_state::COMPLETE;
|
||||
progress = true;
|
||||
return;
|
||||
}
|
||||
default:
|
||||
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Crypto::execute(Crypto_plain_buffer &plain_buf,
|
||||
Crypto_cipher_buffer &cipher_buf,
|
||||
bool &progress)
|
||||
{
|
||||
switch (_job.op) {
|
||||
case Operation::ENCRYPT_BLOCK:
|
||||
|
||||
_execute_encrypt_block(_job, plain_buf, cipher_buf, progress);
|
||||
break;
|
||||
|
||||
case Operation::DECRYPT_BLOCK:
|
||||
|
||||
_execute_decrypt_block(_job, plain_buf, cipher_buf, progress);
|
||||
break;
|
||||
|
||||
case Operation::INVALID:
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -1,75 +0,0 @@
|
||||
/*
|
||||
* \brief Identifiers for the Tresor modules used in the Tresor tester
|
||||
* \author Martin Stein
|
||||
* \date 2020-08-26
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2020 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
#ifndef _MODULE_TYPE_H_
|
||||
#define _MODULE_TYPE_H_
|
||||
|
||||
/* Genode includes */
|
||||
#include <base/stdint.h>
|
||||
|
||||
enum class Module_type : Genode::uint8_t
|
||||
{
|
||||
TRESOR_INIT,
|
||||
TRESOR_CHECK,
|
||||
TRESOR,
|
||||
};
|
||||
|
||||
|
||||
static Module_type module_type_from_uint32(Genode::uint32_t uint32)
|
||||
{
|
||||
class Bad_tag { };
|
||||
switch (uint32) {
|
||||
case 1: return Module_type::TRESOR_INIT;
|
||||
case 2: return Module_type::TRESOR;
|
||||
case 4: return Module_type::TRESOR_CHECK;
|
||||
default: throw Bad_tag();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static Genode::uint32_t module_type_to_uint32(Module_type type)
|
||||
{
|
||||
class Bad_type { };
|
||||
switch (type) {
|
||||
case Module_type::TRESOR_INIT : return 1;
|
||||
case Module_type::TRESOR : return 2;
|
||||
case Module_type::TRESOR_CHECK: return 4;
|
||||
}
|
||||
throw Bad_type();
|
||||
}
|
||||
|
||||
|
||||
static Module_type tag_get_module_type(Genode::uint32_t tag)
|
||||
{
|
||||
return module_type_from_uint32((tag >> 24) & 0xff);
|
||||
}
|
||||
|
||||
|
||||
static Genode::uint32_t tag_set_module_type(Genode::uint32_t tag,
|
||||
Module_type type)
|
||||
{
|
||||
if (tag >> 24) {
|
||||
|
||||
class Bad_tag { };
|
||||
throw Bad_tag();
|
||||
}
|
||||
return tag | (module_type_to_uint32(type) << 24);
|
||||
}
|
||||
|
||||
|
||||
static Genode::uint32_t tag_unset_module_type(Genode::uint32_t tag)
|
||||
{
|
||||
return tag & 0xffffff;
|
||||
}
|
||||
|
||||
#endif /* _MODULE_TYPE_H_ */
|
@ -3,6 +3,7 @@ TARGET := tresor_tester
|
||||
SRC_CC += main.cc
|
||||
|
||||
INC_DIR += $(PRG_DIR)
|
||||
INC_DIR += $(REP_DIR)/src/app/tresor_init/include
|
||||
|
||||
LIBS += base
|
||||
LIBS += tresor
|
||||
|
@ -1,63 +0,0 @@
|
||||
/*
|
||||
* \brief Verbosity configuration of the Tresor tester
|
||||
* \author Martin Stein
|
||||
* \date 2020-10-29
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2020 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
#ifndef _TRESOR_TESTER__VERBOSE_NODE_H_
|
||||
#define _TRESOR_TESTER__VERBOSE_NODE_H_
|
||||
|
||||
/* Genode includes */
|
||||
#include <util/xml_node.h>
|
||||
|
||||
class Verbose_node
|
||||
{
|
||||
private:
|
||||
|
||||
bool _cmd_pool_cmd_pending { false };
|
||||
bool _cmd_pool_cmd_in_progress { false };
|
||||
bool _cmd_pool_cmd_completed { false };
|
||||
bool _blk_io_req_in_progress { false };
|
||||
bool _blk_io_req_completed { false };
|
||||
bool _ta_req_in_progress { false };
|
||||
bool _ta_req_completed { false };
|
||||
bool _client_data_mismatch { false };
|
||||
bool _client_data_transferred { false };
|
||||
|
||||
public:
|
||||
|
||||
Verbose_node(Genode::Xml_node const &config)
|
||||
{
|
||||
config.with_optional_sub_node("verbose", [&] (Genode::Xml_node const &verbose)
|
||||
{
|
||||
_cmd_pool_cmd_pending = verbose.attribute_value("cmd_pool_cmd_pending" , false);
|
||||
_cmd_pool_cmd_in_progress = verbose.attribute_value("cmd_pool_cmd_in_progress", false);
|
||||
_cmd_pool_cmd_completed = verbose.attribute_value("cmd_pool_cmd_completed" , false);
|
||||
_blk_io_req_in_progress = verbose.attribute_value("blk_io_req_in_progress" , false);
|
||||
_blk_io_req_completed = verbose.attribute_value("blk_io_req_completed" , false);
|
||||
_ta_req_in_progress = verbose.attribute_value("ta_req_in_progress" , false);
|
||||
_ta_req_completed = verbose.attribute_value("ta_req_completed" , false);
|
||||
_client_data_mismatch = verbose.attribute_value("client_data_mismatch" , false);
|
||||
_client_data_transferred = verbose.attribute_value("client_data_transferred" , false);
|
||||
});
|
||||
}
|
||||
|
||||
bool cmd_pool_cmd_pending () const { return _cmd_pool_cmd_pending ; }
|
||||
bool cmd_pool_cmd_in_progress() const { return _cmd_pool_cmd_in_progress; }
|
||||
bool cmd_pool_cmd_completed () const { return _cmd_pool_cmd_completed ; }
|
||||
bool blk_io_req_in_progress () const { return _blk_io_req_in_progress ; }
|
||||
bool blk_io_req_completed () const { return _blk_io_req_completed ; }
|
||||
bool ta_req_in_progress () const { return _ta_req_in_progress ; }
|
||||
bool ta_req_completed () const { return _ta_req_completed ; }
|
||||
bool client_data_mismatch () const { return _client_data_mismatch ; }
|
||||
bool client_data_transferred () const { return _client_data_transferred ; }
|
||||
};
|
||||
|
||||
#endif /* _TRESOR_TESTER__VERBOSE_NODE_H_ */
|
@ -1,197 +0,0 @@
|
||||
/*
|
||||
* \brief Managing block allocation for the initialization of a Tresor device
|
||||
* \author Josef Soentgen
|
||||
* \date 2023-02-28
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2023 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
/* base includes */
|
||||
#include <base/log.h>
|
||||
|
||||
/* tresor includes */
|
||||
#include <tresor/block_allocator.h>
|
||||
|
||||
using namespace Tresor;
|
||||
|
||||
|
||||
Block_allocator_request::Block_allocator_request(Module_id src_module_id,
|
||||
Module_request_id src_request_id)
|
||||
:
|
||||
Module_request { src_module_id, src_request_id, BLOCK_ALLOCATOR }
|
||||
{ }
|
||||
|
||||
|
||||
void Block_allocator_request::print(Output &out) const
|
||||
{
|
||||
Genode::print(out, type_to_string(_type));
|
||||
}
|
||||
|
||||
|
||||
void Block_allocator_request::create(void *buf_ptr,
|
||||
size_t buf_size,
|
||||
uint64_t src_module_id,
|
||||
uint64_t src_request_id,
|
||||
size_t req_type)
|
||||
{
|
||||
Block_allocator_request req { src_module_id, src_request_id };
|
||||
req._type = (Type)req_type;
|
||||
|
||||
if (sizeof(req) > buf_size) {
|
||||
class Bad_size_0 { };
|
||||
throw Bad_size_0 { };
|
||||
}
|
||||
memcpy(buf_ptr, &req, sizeof(req));
|
||||
}
|
||||
|
||||
|
||||
char const *Block_allocator_request::type_to_string(Type type)
|
||||
{
|
||||
switch (type) {
|
||||
case INVALID: return "invalid";
|
||||
case GET: return "get";
|
||||
}
|
||||
return "?";
|
||||
}
|
||||
|
||||
|
||||
void Block_allocator::_execute_get(Channel &channel,
|
||||
bool &progress)
|
||||
{
|
||||
Request &req { channel._request };
|
||||
switch (channel._state) {
|
||||
case Channel::PENDING:
|
||||
|
||||
if (_nr_of_blks <= MAX_PBA - _first_block) {
|
||||
|
||||
req._blk_nr = _first_block + _nr_of_blks;
|
||||
++_nr_of_blks;
|
||||
|
||||
_mark_req_successful(channel, progress);
|
||||
|
||||
} else
|
||||
|
||||
_mark_req_failed(channel, progress, " get next block number");
|
||||
|
||||
return;
|
||||
|
||||
default:
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Block_allocator::_mark_req_failed(Channel &channel,
|
||||
bool &progress,
|
||||
char const *str)
|
||||
{
|
||||
error("request failed: failed to ", str);
|
||||
channel._request._success = false;
|
||||
channel._state = Channel::COMPLETE;
|
||||
progress = true;
|
||||
}
|
||||
|
||||
|
||||
void Block_allocator::_mark_req_successful(Channel &channel,
|
||||
bool &progress)
|
||||
{
|
||||
channel._request._success = true;
|
||||
channel._state = Channel::COMPLETE;
|
||||
progress = true;
|
||||
}
|
||||
|
||||
|
||||
bool Block_allocator::_peek_completed_request(uint8_t *buf_ptr,
|
||||
size_t buf_size)
|
||||
{
|
||||
for (Channel &channel : _channels) {
|
||||
if (channel._state == Channel::COMPLETE) {
|
||||
if (sizeof(channel._request) > buf_size) {
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
memcpy(buf_ptr, &channel._request, sizeof(channel._request));
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
void Block_allocator::_drop_completed_request(Module_request &req)
|
||||
{
|
||||
Module_request_id id { 0 };
|
||||
id = req.dst_request_id();
|
||||
if (id >= NR_OF_CHANNELS) {
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
if (_channels[id]._state != Channel::COMPLETE) {
|
||||
class Exception_2 { };
|
||||
throw Exception_2 { };
|
||||
}
|
||||
_channels[id]._state = Channel::INACTIVE;
|
||||
}
|
||||
|
||||
|
||||
Block_allocator::Block_allocator(uint64_t first_block)
|
||||
:
|
||||
_first_block { first_block },
|
||||
_nr_of_blks { 0 }
|
||||
{ }
|
||||
|
||||
|
||||
bool Block_allocator::ready_to_submit_request()
|
||||
{
|
||||
for (Channel &channel : _channels) {
|
||||
if (channel._state == Channel::INACTIVE)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
void Block_allocator::submit_request(Module_request &req)
|
||||
{
|
||||
for (Module_request_id id { 0 }; id < NR_OF_CHANNELS; id++) {
|
||||
if (_channels[id]._state == Channel::INACTIVE) {
|
||||
req.dst_request_id(id);
|
||||
_channels[id]._request = *static_cast<Request *>(&req);
|
||||
_channels[id]._state = Channel::SUBMITTED;
|
||||
return;
|
||||
}
|
||||
}
|
||||
class Invalid_call { };
|
||||
throw Invalid_call { };
|
||||
}
|
||||
|
||||
|
||||
void Block_allocator::execute(bool &progress)
|
||||
{
|
||||
for (Channel &channel : _channels) {
|
||||
|
||||
if (channel._state == Channel::INACTIVE)
|
||||
continue;
|
||||
|
||||
Request &req { channel._request };
|
||||
switch (req._type) {
|
||||
case Request::GET:
|
||||
|
||||
if (channel._state == Channel::SUBMITTED)
|
||||
channel._state = Channel::PENDING;
|
||||
|
||||
_execute_get(channel, progress);
|
||||
|
||||
break;
|
||||
default:
|
||||
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
}
|
||||
}
|
@ -11,679 +11,199 @@
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
/* base includes */
|
||||
#include <base/log.h>
|
||||
#include <util/construct_at.h>
|
||||
|
||||
/* tresor includes */
|
||||
#include <tresor/crypto.h>
|
||||
#include <tresor/block_io.h>
|
||||
#include <tresor/sha256_4k_hash.h>
|
||||
#include <tresor/hash.h>
|
||||
|
||||
using namespace Tresor;
|
||||
|
||||
|
||||
/**********************
|
||||
** Block_io_request **
|
||||
**********************/
|
||||
|
||||
Block_io_request::Block_io_request(uint64_t src_module_id,
|
||||
uint64_t src_request_id,
|
||||
size_t req_type,
|
||||
uint64_t client_req_offset,
|
||||
uint64_t client_req_tag,
|
||||
uint32_t key_id,
|
||||
uint64_t pba,
|
||||
uint64_t vba,
|
||||
uint64_t blk_count,
|
||||
void *blk_ptr,
|
||||
void *hash_ptr)
|
||||
Block_io_request::Block_io_request(Module_id src_module_id, Module_channel_id src_chan_id, Type type,
|
||||
Request_offset client_req_offset, Request_tag client_req_tag, Key_id key_id,
|
||||
Physical_block_address pba, Virtual_block_address vba, Block &blk, Hash &hash,
|
||||
bool &success)
|
||||
:
|
||||
Module_request { src_module_id, src_request_id, BLOCK_IO },
|
||||
_type { (Type)req_type },
|
||||
_client_req_offset { client_req_offset },
|
||||
_client_req_tag { client_req_tag },
|
||||
_key_id { key_id },
|
||||
_pba { pba },
|
||||
_vba { vba },
|
||||
_blk_count { blk_count },
|
||||
_blk_ptr { (addr_t)blk_ptr },
|
||||
_hash_ptr { (addr_t)hash_ptr }
|
||||
Module_request { src_module_id, src_chan_id, BLOCK_IO }, _type { type },
|
||||
_client_req_offset { client_req_offset }, _client_req_tag { client_req_tag },
|
||||
_key_id { key_id }, _pba { pba }, _vba { vba }, _blk { blk }, _hash { hash }, _success { success }
|
||||
{ }
|
||||
|
||||
|
||||
void Block_io_request::print(Output &out) const
|
||||
{
|
||||
if (_blk_count > 1)
|
||||
Genode::print(out, type_to_string(_type), " pbas ", _pba, "..", _pba + _blk_count - 1);
|
||||
else
|
||||
Genode::print(out, type_to_string(_type), " pba ", _pba);
|
||||
}
|
||||
|
||||
|
||||
char const *Block_io_request::type_to_string(Type type)
|
||||
{
|
||||
switch (type) {
|
||||
case INVALID: return "invalid";
|
||||
case READ: return "read";
|
||||
case WRITE: return "write";
|
||||
case SYNC: return "sync";
|
||||
case READ_CLIENT_DATA: return "read_client_data";
|
||||
case WRITE_CLIENT_DATA: return "write_client_data";
|
||||
}
|
||||
return "?";
|
||||
ASSERT_NEVER_REACHED;
|
||||
}
|
||||
|
||||
|
||||
/**************
|
||||
** Block_io **
|
||||
**************/
|
||||
|
||||
bool Block_io::_peek_generated_request(uint8_t *buf_ptr,
|
||||
size_t buf_size)
|
||||
void Block_io_channel::_generated_req_completed(State_uint state_uint)
|
||||
{
|
||||
for (uint32_t id { 0 }; id < NR_OF_CHANNELS; id++) {
|
||||
|
||||
Channel const &channel { _channels[id] };
|
||||
Crypto_request::Type crypto_req_type {
|
||||
channel._state == Channel::DECRYPT_CLIENT_DATA_PENDING ?
|
||||
Crypto_request::DECRYPT_CLIENT_DATA :
|
||||
channel._state == Channel::ENCRYPT_CLIENT_DATA_PENDING ?
|
||||
Crypto_request::ENCRYPT_CLIENT_DATA :
|
||||
Crypto_request::INVALID };
|
||||
|
||||
if (crypto_req_type != Crypto_request::INVALID) {
|
||||
|
||||
Request const &req { channel._request };
|
||||
construct_in_buf<Crypto_request>(
|
||||
buf_ptr, buf_size, BLOCK_IO, id, crypto_req_type,
|
||||
req._client_req_offset, req._client_req_tag,
|
||||
req._key_id, nullptr, req._pba, req._vba, nullptr,
|
||||
(void *)&channel._blk_buf);
|
||||
|
||||
return true;
|
||||
}
|
||||
if (!_generated_req_success) {
|
||||
error("block io: request (", *_req_ptr, ") failed because generated request failed)");
|
||||
_req_ptr->_success = false;
|
||||
_state = REQ_COMPLETE;
|
||||
_req_ptr = nullptr;
|
||||
return;
|
||||
}
|
||||
return false;
|
||||
_state = (State)state_uint;
|
||||
}
|
||||
|
||||
|
||||
void Block_io::_drop_generated_request(Module_request &req)
|
||||
{
|
||||
Module_request_id const id { req.src_request_id() };
|
||||
if (id >= NR_OF_CHANNELS) {
|
||||
class Bad_id { };
|
||||
throw Bad_id { };
|
||||
}
|
||||
switch (_channels[id]._state) {
|
||||
case Channel::DECRYPT_CLIENT_DATA_PENDING:
|
||||
_channels[id]._state = Channel::DECRYPT_CLIENT_DATA_IN_PROGRESS;
|
||||
break;
|
||||
case Channel::ENCRYPT_CLIENT_DATA_PENDING:
|
||||
_channels[id]._state = Channel::ENCRYPT_CLIENT_DATA_IN_PROGRESS;
|
||||
break;
|
||||
default:
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Block_io::generated_request_complete(Module_request &mod_req)
|
||||
{
|
||||
Module_request_id const id { mod_req.src_request_id() };
|
||||
if (id >= NR_OF_CHANNELS) {
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
switch (mod_req.dst_module_id()) {
|
||||
case CRYPTO:
|
||||
{
|
||||
Crypto_request const &gen_req { *static_cast<Crypto_request *>(&mod_req) };
|
||||
switch (_channels[id]._state) {
|
||||
case Channel::DECRYPT_CLIENT_DATA_IN_PROGRESS:
|
||||
_channels[id]._state = Channel::DECRYPT_CLIENT_DATA_COMPLETE;
|
||||
_channels[id]._generated_req_success = gen_req.success();
|
||||
break;
|
||||
case Channel::ENCRYPT_CLIENT_DATA_IN_PROGRESS:
|
||||
_channels[id]._state = Channel::ENCRYPT_CLIENT_DATA_COMPLETE;
|
||||
_channels[id]._generated_req_success = gen_req.success();
|
||||
break;
|
||||
default:
|
||||
class Exception_2 { };
|
||||
throw Exception_2 { };
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
class Exception_3 { };
|
||||
throw Exception_3 { };
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Block_io::_mark_req_failed(Channel &channel,
|
||||
bool &progress,
|
||||
char const *str)
|
||||
void Block_io_channel::_mark_req_failed(bool &progress, Error_string str)
|
||||
{
|
||||
error("request failed: failed to ", str);
|
||||
channel._request._success = false;
|
||||
channel._state = Channel::COMPLETE;
|
||||
_req_ptr->_success = false;
|
||||
_state = REQ_COMPLETE;
|
||||
_req_ptr = nullptr;
|
||||
progress = true;
|
||||
}
|
||||
|
||||
|
||||
void Block_io::_mark_req_successful(Channel &channel,
|
||||
bool &progress)
|
||||
void Block_io_channel::_mark_req_successful(bool &progress)
|
||||
{
|
||||
channel._request._success = true;
|
||||
channel._state = Channel::COMPLETE;
|
||||
Request &req { *_req_ptr };
|
||||
req._success = true;
|
||||
_state = REQ_COMPLETE;
|
||||
_req_ptr = nullptr;
|
||||
progress = true;
|
||||
}
|
||||
|
||||
|
||||
void Block_io::_execute_read(Channel &channel,
|
||||
bool &progress)
|
||||
{
|
||||
using Result = Vfs::File_io_service::Read_result;
|
||||
|
||||
Request &req { channel._request };
|
||||
switch (channel._state) {
|
||||
case Channel::PENDING:
|
||||
|
||||
enum : uint64_t { MAX_FILE_OFFSET = 0x7fffffffffffffff };
|
||||
if (req._pba > (size_t)MAX_FILE_OFFSET / (size_t)BLOCK_SIZE) {
|
||||
|
||||
error("request failed: failed to seek file offset, pba: ", req._pba);
|
||||
channel._state = Channel::COMPLETE;
|
||||
req._success = false;
|
||||
progress = true;
|
||||
return;
|
||||
if (VERBOSE_BLOCK_IO && (!VERBOSE_BLOCK_IO_PBA_FILTER || VERBOSE_BLOCK_IO_PBA == req._pba)) {
|
||||
switch (req._type) {
|
||||
case Request::READ:
|
||||
case Request::WRITE:
|
||||
log("block_io: ", req.type_to_string(req._type), " pba ", req._pba, " hash ", hash(req._blk));
|
||||
break;
|
||||
default: break;
|
||||
}
|
||||
_vfs_handle.seek(req._pba * BLOCK_SIZE +
|
||||
channel._nr_of_processed_bytes);
|
||||
|
||||
if (!_vfs_handle.fs().queue_read(&_vfs_handle, channel._nr_of_remaining_bytes)) {
|
||||
return;
|
||||
}
|
||||
channel._state = Channel::IN_PROGRESS;
|
||||
progress = true;
|
||||
return;
|
||||
|
||||
case Channel::IN_PROGRESS:
|
||||
{
|
||||
size_t nr_of_read_bytes { 0 };
|
||||
|
||||
Byte_range_ptr dst {
|
||||
(char *)req._blk_ptr + channel._nr_of_processed_bytes,
|
||||
channel._nr_of_remaining_bytes };
|
||||
|
||||
Result const result {
|
||||
_vfs_handle.fs().complete_read(
|
||||
&_vfs_handle, dst, nr_of_read_bytes) };
|
||||
|
||||
switch (result) {
|
||||
case Result::READ_QUEUED:
|
||||
case Result::READ_ERR_WOULD_BLOCK:
|
||||
|
||||
return;
|
||||
|
||||
case Result::READ_OK:
|
||||
|
||||
if (nr_of_read_bytes == 0) {
|
||||
|
||||
error("request failed: number of read bytes is 0");
|
||||
channel._state = Channel::COMPLETE;
|
||||
req._success = false;
|
||||
progress = true;
|
||||
return;
|
||||
}
|
||||
channel._nr_of_processed_bytes += nr_of_read_bytes;
|
||||
channel._nr_of_remaining_bytes -= nr_of_read_bytes;
|
||||
|
||||
if (channel._nr_of_remaining_bytes == 0) {
|
||||
|
||||
channel._state = Channel::COMPLETE;
|
||||
req._success = true;
|
||||
progress = true;
|
||||
return;
|
||||
|
||||
} else {
|
||||
|
||||
channel._state = Channel::PENDING;
|
||||
progress = true;
|
||||
return;
|
||||
}
|
||||
|
||||
case Result::READ_ERR_IO:
|
||||
case Result::READ_ERR_INVALID:
|
||||
|
||||
error("request failed: failed to read from file");
|
||||
channel._state = Channel::COMPLETE;
|
||||
req._success = false;
|
||||
progress = true;
|
||||
return;
|
||||
|
||||
default:
|
||||
|
||||
class Bad_complete_read_result { };
|
||||
throw Bad_complete_read_result { };
|
||||
}
|
||||
}
|
||||
default: return;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Block_io::_execute_read_client_data(Channel &channel,
|
||||
bool &progress)
|
||||
void Block_io_channel::_read(bool &progress)
|
||||
{
|
||||
using Result = Vfs::File_io_service::Read_result;
|
||||
|
||||
Request &req { channel._request };
|
||||
switch (channel._state) {
|
||||
case Channel::PENDING:
|
||||
|
||||
_vfs_handle.seek(req._pba * BLOCK_SIZE +
|
||||
channel._nr_of_processed_bytes);
|
||||
|
||||
if (!_vfs_handle.fs().queue_read(&_vfs_handle, channel._nr_of_remaining_bytes)) {
|
||||
return;
|
||||
}
|
||||
channel._state = Channel::IN_PROGRESS;
|
||||
progress = true;
|
||||
return;
|
||||
|
||||
case Channel::IN_PROGRESS:
|
||||
{
|
||||
size_t nr_of_read_bytes { 0 };
|
||||
|
||||
Byte_range_ptr dst {
|
||||
(char *)&channel._blk_buf + channel._nr_of_processed_bytes,
|
||||
channel._nr_of_remaining_bytes };
|
||||
|
||||
Result const result {
|
||||
_vfs_handle.fs().complete_read(
|
||||
&_vfs_handle, dst, nr_of_read_bytes) };
|
||||
|
||||
switch (result) {
|
||||
case Result::READ_QUEUED:
|
||||
case Result::READ_ERR_WOULD_BLOCK:
|
||||
|
||||
return;
|
||||
|
||||
case Result::READ_OK:
|
||||
|
||||
channel._nr_of_processed_bytes += nr_of_read_bytes;
|
||||
channel._nr_of_remaining_bytes -= nr_of_read_bytes;
|
||||
|
||||
if (channel._nr_of_remaining_bytes == 0) {
|
||||
|
||||
channel._state = Channel::DECRYPT_CLIENT_DATA_PENDING;
|
||||
progress = true;
|
||||
return;
|
||||
|
||||
} else {
|
||||
|
||||
channel._state = Channel::PENDING;
|
||||
progress = true;
|
||||
return;
|
||||
}
|
||||
|
||||
case Result::READ_ERR_IO:
|
||||
case Result::READ_ERR_INVALID:
|
||||
|
||||
channel._state = Channel::COMPLETE;
|
||||
req._success = false;
|
||||
progress = true;
|
||||
return;
|
||||
|
||||
default:
|
||||
|
||||
class Bad_complete_read_result { };
|
||||
throw Bad_complete_read_result { };
|
||||
}
|
||||
}
|
||||
case Channel::DECRYPT_CLIENT_DATA_COMPLETE:
|
||||
|
||||
if (!channel._generated_req_success) {
|
||||
_mark_req_failed(channel, progress, "decrypt client data");
|
||||
return;
|
||||
}
|
||||
_mark_req_successful(channel, progress);
|
||||
return;
|
||||
|
||||
default: return;
|
||||
Request &req { *_req_ptr };
|
||||
switch (_state) {
|
||||
case REQ_SUBMITTED: _file.read(READ_OK, FILE_ERR, req._pba * BLOCK_SIZE, { (char *)&req._blk, BLOCK_SIZE }, progress); break;
|
||||
case READ_OK: _mark_req_successful(progress); break;
|
||||
case FILE_ERR: _mark_req_failed(progress, "file operation failed"); break;
|
||||
default: break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Block_io::_execute_write_client_data(Channel &channel,
|
||||
bool &progress)
|
||||
void Block_io_channel::_read_client_data(bool &progress)
|
||||
{
|
||||
using Result = Vfs::File_io_service::Write_result;
|
||||
|
||||
Request &req { channel._request };
|
||||
switch (channel._state) {
|
||||
case Channel::PENDING:
|
||||
Request &req { *_req_ptr };
|
||||
switch (_state) {
|
||||
case REQ_SUBMITTED: _file.read(READ_OK, FILE_ERR, req._pba * BLOCK_SIZE, { (char *)&_blk, BLOCK_SIZE }, progress); break;
|
||||
case READ_OK:
|
||||
|
||||
channel._state = Channel::ENCRYPT_CLIENT_DATA_PENDING;
|
||||
progress = true;
|
||||
_generate_req<Crypto_request>(
|
||||
PLAINTEXT_BLK_SUPPLIED, progress, Crypto_request::DECRYPT_CLIENT_DATA, req._client_req_offset,
|
||||
req._client_req_tag, req._key_id, *(Key_value *)0, req._pba, req._vba, _blk);
|
||||
return;
|
||||
|
||||
case Channel::ENCRYPT_CLIENT_DATA_COMPLETE:
|
||||
|
||||
if (!channel._generated_req_success) {
|
||||
_mark_req_failed(channel, progress, "encrypt client data");
|
||||
return;
|
||||
}
|
||||
calc_sha256_4k_hash(channel._blk_buf, *(Hash *)req._hash_ptr);
|
||||
_vfs_handle.seek(req._pba * BLOCK_SIZE +
|
||||
channel._nr_of_processed_bytes);
|
||||
|
||||
channel._state = Channel::IN_PROGRESS;
|
||||
progress = true;
|
||||
return;
|
||||
|
||||
case Channel::IN_PROGRESS:
|
||||
{
|
||||
size_t nr_of_written_bytes { 0 };
|
||||
|
||||
Const_byte_range_ptr src {
|
||||
(char const *)&channel._blk_buf + channel._nr_of_processed_bytes,
|
||||
channel._nr_of_remaining_bytes };
|
||||
|
||||
Result const result =
|
||||
_vfs_handle.fs().write(
|
||||
&_vfs_handle, src, nr_of_written_bytes);
|
||||
|
||||
switch (result) {
|
||||
case Result::WRITE_ERR_WOULD_BLOCK:
|
||||
return;
|
||||
|
||||
case Result::WRITE_OK:
|
||||
|
||||
channel._nr_of_processed_bytes += nr_of_written_bytes;
|
||||
channel._nr_of_remaining_bytes -= nr_of_written_bytes;
|
||||
|
||||
if (channel._nr_of_remaining_bytes == 0) {
|
||||
|
||||
channel._state = Channel::COMPLETE;
|
||||
req._success = true;
|
||||
progress = true;
|
||||
return;
|
||||
|
||||
} else {
|
||||
|
||||
channel._state = Channel::PENDING;
|
||||
progress = true;
|
||||
return;
|
||||
}
|
||||
|
||||
case Result::WRITE_ERR_IO:
|
||||
case Result::WRITE_ERR_INVALID:
|
||||
|
||||
channel._state = Channel::COMPLETE;
|
||||
req._success = false;
|
||||
progress = true;
|
||||
return;
|
||||
|
||||
default:
|
||||
|
||||
class Bad_write_result { };
|
||||
throw Bad_write_result { };
|
||||
}
|
||||
|
||||
}
|
||||
default: return;
|
||||
case PLAINTEXT_BLK_SUPPLIED: _mark_req_successful(progress); break;
|
||||
case FILE_ERR: _mark_req_failed(progress, "file operation failed"); break;
|
||||
default: break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Block_io::_execute_write(Channel &channel,
|
||||
bool &progress)
|
||||
void Block_io_channel::_write_client_data(bool &progress)
|
||||
{
|
||||
using Result = Vfs::File_io_service::Write_result;
|
||||
Request &req { *_req_ptr };
|
||||
switch (_state) {
|
||||
case REQ_SUBMITTED:
|
||||
|
||||
Request &req { channel._request };
|
||||
switch (channel._state) {
|
||||
case Channel::PENDING:
|
||||
|
||||
_vfs_handle.seek(req._pba * BLOCK_SIZE +
|
||||
channel._nr_of_processed_bytes);
|
||||
|
||||
channel._state = Channel::IN_PROGRESS;
|
||||
progress = true;
|
||||
_generate_req<Crypto_request>(
|
||||
CIPHERTEXT_BLK_OBTAINED, progress, Crypto_request::ENCRYPT_CLIENT_DATA, req._client_req_offset,
|
||||
req._client_req_tag, req._key_id, *(Key_value *)0, req._pba, req._vba, _blk);
|
||||
break;
|
||||
|
||||
case Channel::IN_PROGRESS:
|
||||
{
|
||||
size_t nr_of_written_bytes { 0 };
|
||||
case CIPHERTEXT_BLK_OBTAINED:
|
||||
|
||||
Const_byte_range_ptr src {
|
||||
(char const *)req._blk_ptr + channel._nr_of_processed_bytes,
|
||||
channel._nr_of_remaining_bytes };
|
||||
calc_hash(_blk, req._hash);
|
||||
_file.write(WRITE_OK, FILE_ERR, req._pba * BLOCK_SIZE, { (char *)&_blk, BLOCK_SIZE }, progress); break;
|
||||
break;
|
||||
|
||||
Result const result =
|
||||
_vfs_handle.fs().write(
|
||||
&_vfs_handle, src, nr_of_written_bytes);
|
||||
|
||||
switch (result) {
|
||||
case Result::WRITE_ERR_WOULD_BLOCK:
|
||||
return;
|
||||
|
||||
case Result::WRITE_OK:
|
||||
|
||||
channel._nr_of_processed_bytes += nr_of_written_bytes;
|
||||
channel._nr_of_remaining_bytes -= nr_of_written_bytes;
|
||||
|
||||
if (channel._nr_of_remaining_bytes == 0) {
|
||||
|
||||
channel._state = Channel::COMPLETE;
|
||||
req._success = true;
|
||||
progress = true;
|
||||
return;
|
||||
|
||||
} else {
|
||||
|
||||
channel._state = Channel::PENDING;
|
||||
progress = true;
|
||||
return;
|
||||
}
|
||||
|
||||
case Result::WRITE_ERR_IO:
|
||||
case Result::WRITE_ERR_INVALID:
|
||||
|
||||
channel._state = Channel::COMPLETE;
|
||||
req._success = false;
|
||||
progress = true;
|
||||
return;
|
||||
|
||||
default:
|
||||
|
||||
class Bad_write_result { };
|
||||
throw Bad_write_result { };
|
||||
}
|
||||
|
||||
}
|
||||
default: return;
|
||||
case WRITE_OK: _mark_req_successful(progress); break;
|
||||
case FILE_ERR: _mark_req_failed(progress, "file operation failed"); break;
|
||||
default: break;
|
||||
}
|
||||
}
|
||||
|
||||
void Block_io::_execute_sync(Channel &channel,
|
||||
bool &progress)
|
||||
|
||||
void Block_io_channel::_write(bool &progress)
|
||||
{
|
||||
using Result = Vfs::File_io_service::Sync_result;
|
||||
|
||||
Request &req { channel._request };
|
||||
switch (channel._state) {
|
||||
case Channel::PENDING:
|
||||
|
||||
if (!_vfs_handle.fs().queue_sync(&_vfs_handle)) {
|
||||
return;
|
||||
}
|
||||
channel._state = Channel::IN_PROGRESS;
|
||||
progress = true;
|
||||
break;;
|
||||
|
||||
case Channel::IN_PROGRESS:
|
||||
|
||||
switch (_vfs_handle.fs().complete_sync(&_vfs_handle)) {
|
||||
case Result::SYNC_QUEUED:
|
||||
|
||||
return;
|
||||
|
||||
case Result::SYNC_ERR_INVALID:
|
||||
|
||||
req._success = false;
|
||||
channel._state = Channel::COMPLETE;
|
||||
progress = true;
|
||||
return;
|
||||
|
||||
case Result::SYNC_OK:
|
||||
|
||||
req._success = true;
|
||||
channel._state = Channel::COMPLETE;
|
||||
progress = true;
|
||||
return;
|
||||
|
||||
default:
|
||||
|
||||
class Bad_sync_result { };
|
||||
throw Bad_sync_result { };
|
||||
}
|
||||
|
||||
default: return;
|
||||
Request &req { *_req_ptr };
|
||||
switch (_state) {
|
||||
case REQ_SUBMITTED: _file.write(WRITE_OK, FILE_ERR, req._pba * BLOCK_SIZE, { (char *)&req._blk, BLOCK_SIZE }, progress); break;
|
||||
case WRITE_OK: _mark_req_successful(progress); break;
|
||||
case FILE_ERR: _mark_req_failed(progress, "file operation failed"); break;
|
||||
default: break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Block_io_channel::_sync(bool &progress)
|
||||
{
|
||||
switch (_state) {
|
||||
case REQ_SUBMITTED: _file.sync(SYNC_OK, FILE_ERR, progress); break;
|
||||
case SYNC_OK: _mark_req_successful(progress); break;
|
||||
case FILE_ERR: _mark_req_failed(progress, "file operation failed"); break;
|
||||
default: break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Block_io_channel::execute(bool &progress)
|
||||
{
|
||||
if (!_req_ptr)
|
||||
return;
|
||||
|
||||
switch (_req_ptr->_type) {
|
||||
case Request::READ: _read(progress); break;
|
||||
case Request::WRITE: _write(progress); break;
|
||||
case Request::SYNC: _sync(progress); break;
|
||||
case Request::READ_CLIENT_DATA: _read_client_data(progress); break;
|
||||
case Request::WRITE_CLIENT_DATA: _write_client_data(progress); break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Block_io::execute(bool &progress)
|
||||
{
|
||||
for (Channel &channel : _channels) {
|
||||
|
||||
if (channel._state == Channel::INACTIVE)
|
||||
continue;
|
||||
|
||||
Request &req { channel._request };
|
||||
if (channel._state == Channel::SUBMITTED) {
|
||||
|
||||
uint64_t const nr_of_remaining_bytes {
|
||||
req._blk_count * BLOCK_SIZE };
|
||||
|
||||
if (nr_of_remaining_bytes > ~(size_t)0) {
|
||||
class Exception_2 { };
|
||||
throw Exception_2 { };
|
||||
}
|
||||
channel._state = Channel::PENDING;
|
||||
channel._nr_of_processed_bytes = 0;
|
||||
channel._nr_of_remaining_bytes = (size_t)nr_of_remaining_bytes;
|
||||
}
|
||||
switch (req._type) {
|
||||
case Request::READ: _execute_read(channel, progress); break;
|
||||
case Request::WRITE: _execute_write(channel, progress); break;
|
||||
case Request::SYNC: _execute_sync(channel, progress); break;
|
||||
case Request::READ_CLIENT_DATA: _execute_read_client_data(channel, progress); break;
|
||||
case Request::WRITE_CLIENT_DATA: _execute_write_client_data(channel, progress); break;
|
||||
default:
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
}
|
||||
for_each_channel<Channel>([&] (Channel &chan) {
|
||||
chan.execute(progress); });
|
||||
}
|
||||
|
||||
|
||||
Block_io::Block_io(Vfs::Env &vfs_env,
|
||||
Xml_node const &xml_node)
|
||||
void Block_io_channel::_request_submitted(Module_request &mod_req)
|
||||
{
|
||||
_req_ptr = static_cast<Request *>(&mod_req);
|
||||
_state = REQ_SUBMITTED;
|
||||
}
|
||||
|
||||
|
||||
Block_io_channel::Block_io_channel(Module_channel_id id, Vfs::Env &vfs_env, Xml_node const &xml_node)
|
||||
:
|
||||
_path { xml_node.attribute_value("path", String<32> { "" } ) },
|
||||
_vfs_env { vfs_env }
|
||||
Module_channel { BLOCK_IO, id }, _vfs_env { vfs_env }, _path { xml_node.attribute_value("path", Tresor::Path()) }
|
||||
{ }
|
||||
|
||||
|
||||
bool Block_io::_peek_completed_request(uint8_t *buf_ptr,
|
||||
size_t buf_size)
|
||||
Block_io::Block_io(Vfs::Env &vfs_env, Xml_node const &xml_node)
|
||||
{
|
||||
for (Channel &channel : _channels) {
|
||||
if (channel._state == Channel::COMPLETE) {
|
||||
Request &req { channel._request };
|
||||
if (sizeof(req) > buf_size) {
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
memcpy(buf_ptr, &req, sizeof(req));
|
||||
|
||||
if (VERBOSE_BLOCK_IO &&
|
||||
(!VERBOSE_BLOCK_IO_PBA_FILTER ||
|
||||
VERBOSE_BLOCK_IO_PBA == req._pba)) {
|
||||
|
||||
switch (req._type) {
|
||||
case Request::READ:
|
||||
case Request::WRITE:
|
||||
{
|
||||
Hash hash;
|
||||
calc_sha256_4k_hash(*(Block *)req._blk_ptr, hash);
|
||||
log("block_io: ", req.type_name(), " pba ", req._pba,
|
||||
" data ", *(Block *)req._blk_ptr, " hash ", hash);
|
||||
|
||||
break;
|
||||
}
|
||||
case Request::READ_CLIENT_DATA:
|
||||
case Request::WRITE_CLIENT_DATA:
|
||||
{
|
||||
Hash hash;
|
||||
calc_sha256_4k_hash(channel._blk_buf, hash);
|
||||
log("block_io: ", req.type_name(), " pba ", req._pba,
|
||||
" data ", channel._blk_buf,
|
||||
" hash ", hash);
|
||||
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
Module_channel_id id { 0 };
|
||||
for (Constructible<Channel> &chan : _channels) {
|
||||
chan.construct(id++, vfs_env, xml_node);
|
||||
add_channel(*chan);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
void Block_io::_drop_completed_request(Module_request &req)
|
||||
{
|
||||
Module_request_id id { 0 };
|
||||
id = req.dst_request_id();
|
||||
if (id >= NR_OF_CHANNELS) {
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
if (_channels[id]._state != Channel::COMPLETE) {
|
||||
class Exception_2 { };
|
||||
throw Exception_2 { };
|
||||
}
|
||||
_channels[id]._state = Channel::INACTIVE;
|
||||
}
|
||||
|
||||
|
||||
bool Block_io::ready_to_submit_request()
|
||||
{
|
||||
for (Channel &channel : _channels) {
|
||||
if (channel._state == Channel::INACTIVE)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void Block_io::submit_request(Module_request &req)
|
||||
{
|
||||
for (Module_request_id id { 0 }; id < NR_OF_CHANNELS; id++) {
|
||||
if (_channels[id]._state == Channel::INACTIVE) {
|
||||
req.dst_request_id(id);
|
||||
_channels[id]._request = *static_cast<Request *>(&req);
|
||||
_channels[id]._state = Channel::SUBMITTED;
|
||||
return;
|
||||
}
|
||||
}
|
||||
class Invalid_call { };
|
||||
throw Invalid_call { };
|
||||
}
|
||||
|
@ -11,51 +11,42 @@
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
/* base includes */
|
||||
#include <base/log.h>
|
||||
#include <util/construct_at.h>
|
||||
|
||||
/* tresor includes */
|
||||
#include <tresor/crypto.h>
|
||||
#include <tresor/client_data.h>
|
||||
#include <tresor/sha256_4k_hash.h>
|
||||
#include <tresor/hash.h>
|
||||
|
||||
using namespace Tresor;
|
||||
|
||||
|
||||
/********************
|
||||
** Crypto_request **
|
||||
********************/
|
||||
|
||||
Crypto_request::Crypto_request(uint64_t src_module_id,
|
||||
uint64_t src_request_id,
|
||||
size_t req_type,
|
||||
uint64_t client_req_offset,
|
||||
uint64_t client_req_tag,
|
||||
uint32_t key_id,
|
||||
void *key_plaintext_ptr,
|
||||
uint64_t pba,
|
||||
uint64_t vba,
|
||||
void *plaintext_blk_ptr,
|
||||
void *ciphertext_blk_ptr)
|
||||
Crypto_request::Crypto_request(Module_id src_module_id, Module_channel_id src_chan_id, Type type,
|
||||
Request_offset client_req_offset, Request_tag client_req_tag, Key_id key_id,
|
||||
Key_value const &key_plaintext, Physical_block_address pba, Virtual_block_address vba,
|
||||
Block &blk, bool &success)
|
||||
:
|
||||
Module_request { src_module_id, src_request_id, CRYPTO },
|
||||
_type { (Type)req_type },
|
||||
_client_req_offset { client_req_offset },
|
||||
_client_req_tag { client_req_tag },
|
||||
_pba { pba },
|
||||
_vba { vba },
|
||||
_key_id { key_id },
|
||||
_key_plaintext_ptr { (addr_t)key_plaintext_ptr },
|
||||
_plaintext_blk_ptr { (addr_t)plaintext_blk_ptr },
|
||||
_ciphertext_blk_ptr { (addr_t)ciphertext_blk_ptr }
|
||||
Module_request { src_module_id, src_chan_id, CRYPTO }, _type { type }, _client_req_offset { client_req_offset },
|
||||
_client_req_tag { client_req_tag }, _pba { pba }, _vba { vba }, _key_id { key_id }, _key_plaintext { key_plaintext },
|
||||
_blk { blk }, _success { success }
|
||||
{ }
|
||||
|
||||
|
||||
void Crypto_request::print(Output &out) const
|
||||
{
|
||||
Genode::print(out, type_to_string(_type));
|
||||
switch (_type) {
|
||||
case ADD_KEY:
|
||||
case REMOVE_KEY: Genode::print(out, " ", _key_id); break;
|
||||
case DECRYPT:
|
||||
case ENCRYPT:
|
||||
case DECRYPT_CLIENT_DATA:
|
||||
case ENCRYPT_CLIENT_DATA: Genode::print(out, " pba ", _pba); break;
|
||||
default: break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
char const *Crypto_request::type_to_string(Type type)
|
||||
{
|
||||
switch (type) {
|
||||
case INVALID: return "invalid";
|
||||
case ADD_KEY: return "add_key";
|
||||
case REMOVE_KEY: return "remove_key";
|
||||
case ENCRYPT_CLIENT_DATA: return "encrypt_client_data";
|
||||
@ -63,671 +54,276 @@ char const *Crypto_request::type_to_string(Type type)
|
||||
case ENCRYPT: return "encrypt";
|
||||
case DECRYPT: return "decrypt";
|
||||
}
|
||||
return "?";
|
||||
ASSERT_NEVER_REACHED;
|
||||
}
|
||||
|
||||
|
||||
/************
|
||||
** Crypto **
|
||||
************/
|
||||
|
||||
bool Crypto::_peek_generated_request(uint8_t *buf_ptr,
|
||||
size_t buf_size)
|
||||
void Crypto_channel::_generated_req_completed(State_uint state_uint)
|
||||
{
|
||||
for (uint32_t id { 0 }; id < NR_OF_CHANNELS; id++) {
|
||||
|
||||
Channel const &chan { _channels[id] };
|
||||
Client_data_request::Type cd_req_type {
|
||||
chan._state == Channel::OBTAIN_PLAINTEXT_BLK_PENDING ?
|
||||
Client_data_request::OBTAIN_PLAINTEXT_BLK :
|
||||
chan._state == Channel::SUPPLY_PLAINTEXT_BLK_PENDING ?
|
||||
Client_data_request::SUPPLY_PLAINTEXT_BLK :
|
||||
Client_data_request::INVALID };
|
||||
|
||||
if (cd_req_type != Client_data_request::INVALID) {
|
||||
|
||||
Request const &req { chan._request };
|
||||
Client_data_request const cd_req {
|
||||
CRYPTO, id, cd_req_type, req._client_req_offset,
|
||||
req._client_req_tag, req._pba, req._vba,
|
||||
(addr_t)&chan._blk_buf };
|
||||
|
||||
if (sizeof(cd_req) > buf_size) {
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
memcpy(buf_ptr, &cd_req, sizeof(cd_req));;
|
||||
return true;
|
||||
}
|
||||
if (!_generated_req_success) {
|
||||
error("crypto: request (", *_req_ptr, ") failed because generated request failed)");
|
||||
_req_ptr->_success = false;
|
||||
_state = REQ_COMPLETE;
|
||||
_req_ptr = nullptr;
|
||||
return;
|
||||
}
|
||||
return false;
|
||||
_state = (State)state_uint;
|
||||
}
|
||||
|
||||
|
||||
void Crypto::_drop_generated_request(Module_request &req)
|
||||
Constructible<Crypto_channel::Key_directory> &Crypto_channel::_key_dir(Key_id key_id)
|
||||
{
|
||||
Module_request_id const id { req.src_request_id() };
|
||||
if (id >= NR_OF_CHANNELS) {
|
||||
class Bad_id { };
|
||||
throw Bad_id { };
|
||||
}
|
||||
switch (_channels[id]._state) {
|
||||
case Channel::OBTAIN_PLAINTEXT_BLK_PENDING:
|
||||
_channels[id]._state = Channel::OBTAIN_PLAINTEXT_BLK_IN_PROGRESS;
|
||||
break;
|
||||
case Channel::SUPPLY_PLAINTEXT_BLK_PENDING:
|
||||
_channels[id]._state = Channel::SUPPLY_PLAINTEXT_BLK_IN_PROGRESS;
|
||||
break;
|
||||
default:
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Crypto::Key_directory &Crypto::_lookup_key_dir(uint32_t key_id)
|
||||
{
|
||||
for (Key_directory &key_dir : _key_dirs) {
|
||||
if (key_dir.key_id == key_id) {
|
||||
for (Constructible<Key_directory> &key_dir : _key_dirs)
|
||||
if (key_dir.constructed() && key_dir->key_id == key_id)
|
||||
return key_dir;
|
||||
}
|
||||
}
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
ASSERT_NEVER_REACHED;
|
||||
}
|
||||
|
||||
|
||||
void Crypto::_mark_req_failed(Channel &channel,
|
||||
bool &progress,
|
||||
char const *str)
|
||||
void Crypto_channel::_mark_req_failed(bool &progress, char const *str)
|
||||
{
|
||||
error("crypto: request (", channel._request, ") failed at step \"", str, "\"");
|
||||
channel._request._success = false;
|
||||
channel._state = Channel::COMPLETE;
|
||||
error("crypto: request (", *_req_ptr, ") failed at step \"", str, "\"");
|
||||
_req_ptr->_success = false;
|
||||
_state = REQ_COMPLETE;
|
||||
_req_ptr = nullptr;
|
||||
progress = true;
|
||||
}
|
||||
|
||||
|
||||
void Crypto::_mark_req_successful(Channel &channel,
|
||||
bool &progress)
|
||||
void Crypto_channel::_mark_req_successful(bool &progress)
|
||||
{
|
||||
channel._request._success = true;
|
||||
channel._state = Channel::COMPLETE;
|
||||
Request &req { *_req_ptr };
|
||||
req._success = true;
|
||||
_state = REQ_COMPLETE;
|
||||
_req_ptr = nullptr;
|
||||
progress = true;
|
||||
}
|
||||
if (VERBOSE_WRITE_VBA && req._type == Request::ENCRYPT_CLIENT_DATA)
|
||||
log(" encrypt leaf data: plaintext ", _blk, " hash ", hash(_blk),
|
||||
"\n update branch:\n ", Branch_lvl_prefix("leaf data: "), req._blk);
|
||||
|
||||
if (VERBOSE_READ_VBA && req._type == Request::DECRYPT_CLIENT_DATA)
|
||||
log(" ", Branch_lvl_prefix("leaf data: "), req._blk,
|
||||
"\n decrypt leaf data: plaintext ", _blk, " hash ", hash(_blk));
|
||||
|
||||
void Crypto::_execute_add_key(Channel &channel,
|
||||
bool &progress)
|
||||
{
|
||||
Request &req { channel._request };
|
||||
switch (channel._state) {
|
||||
case Channel::SUBMITTED:
|
||||
{
|
||||
_add_key_handle.seek(0);
|
||||
|
||||
char buf[sizeof(req._key_id) + KEY_SIZE] { };
|
||||
memcpy(buf, &req._key_id, sizeof(req._key_id));
|
||||
memcpy(buf + sizeof(req._key_id), (void *)req._key_plaintext_ptr, KEY_SIZE);
|
||||
|
||||
Const_byte_range_ptr const src(buf, sizeof(buf));
|
||||
size_t nr_of_written_bytes { 0 };
|
||||
Write_result const write_result {
|
||||
_add_key_handle.fs().write(
|
||||
&_add_key_handle, src, nr_of_written_bytes) };
|
||||
|
||||
switch (write_result) {
|
||||
case Write_result::WRITE_OK:
|
||||
{
|
||||
Key_directory *key_dir_ptr { nullptr };
|
||||
for (Key_directory &key_dir : _key_dirs) {
|
||||
if (key_dir.key_id == 0)
|
||||
key_dir_ptr = &key_dir;
|
||||
}
|
||||
if (key_dir_ptr == nullptr) {
|
||||
|
||||
_mark_req_failed(channel, progress, "find unused key dir");
|
||||
return;
|
||||
}
|
||||
key_dir_ptr->key_id = req._key_id;
|
||||
key_dir_ptr->encrypt_handle =
|
||||
&vfs_open_rw(
|
||||
_vfs_env,
|
||||
{ _path.string(), "/keys/", req._key_id, "/encrypt" });
|
||||
|
||||
key_dir_ptr->decrypt_handle =
|
||||
&vfs_open_rw(
|
||||
_vfs_env,
|
||||
{ _path.string(), "/keys/", req._key_id, "/decrypt" });
|
||||
|
||||
_mark_req_successful(channel, progress);
|
||||
return;
|
||||
if (VERBOSE_CRYPTO) {
|
||||
switch (req._type) {
|
||||
case Request::DECRYPT_CLIENT_DATA:
|
||||
case Request::ENCRYPT_CLIENT_DATA:
|
||||
log("crypto: ", req.type_to_string(req._type), " pba ", req._pba, " vba ", req._vba,
|
||||
" plain ", _blk, " cipher ", req._blk);
|
||||
break;
|
||||
default: break;
|
||||
}
|
||||
case Write_result::WRITE_ERR_WOULD_BLOCK:
|
||||
case Write_result::WRITE_ERR_INVALID:
|
||||
case Write_result::WRITE_ERR_IO:
|
||||
|
||||
_mark_req_failed(channel, progress, "write command");
|
||||
return;
|
||||
}
|
||||
return;
|
||||
}
|
||||
default:
|
||||
|
||||
return;
|
||||
if (VERBOSE_BLOCK_IO && (!VERBOSE_BLOCK_IO_PBA_FILTER || VERBOSE_BLOCK_IO_PBA == req._pba)) {
|
||||
switch (req._type) {
|
||||
case Request::DECRYPT_CLIENT_DATA:
|
||||
log("block_io: read pba ", req._pba, " hash ", hash(req._blk), " (plaintext hash ", hash(_blk), ")");
|
||||
break;
|
||||
case Request::ENCRYPT_CLIENT_DATA:
|
||||
log("block_io: write pba ", req._pba, " hash ", hash(req._blk), " (plaintext hash ", hash(_blk), ")");
|
||||
break;
|
||||
default: break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Crypto::_execute_remove_key(Channel &channel,
|
||||
bool &progress)
|
||||
void Crypto_channel::_add_key(bool &progress)
|
||||
{
|
||||
Request &req { channel._request };
|
||||
switch (channel._state) {
|
||||
case Channel::SUBMITTED:
|
||||
Request &req { *_req_ptr };
|
||||
switch (_state) {
|
||||
case REQ_SUBMITTED:
|
||||
|
||||
memcpy(_add_key_buf, &req._key_id, sizeof(Key_id));
|
||||
memcpy(_add_key_buf + sizeof(Key_id), &req._key_plaintext, KEY_SIZE);
|
||||
_add_key_file.write(WRITE_OK, FILE_ERR, 0, { _add_key_buf, sizeof(_add_key_buf) }, progress);
|
||||
break;
|
||||
|
||||
case WRITE_OK:
|
||||
{
|
||||
_remove_key_handle.seek(0);
|
||||
|
||||
Const_byte_range_ptr src {
|
||||
(char const*)&req._key_id, sizeof(req._key_id) };
|
||||
|
||||
size_t nr_of_written_bytes { 0 };
|
||||
Write_result const result =
|
||||
_remove_key_handle.fs().write(
|
||||
&_remove_key_handle, src, nr_of_written_bytes);
|
||||
|
||||
switch (result) {
|
||||
case Write_result::WRITE_OK:
|
||||
{
|
||||
Key_directory &key_dir { _lookup_key_dir(req._key_id) };
|
||||
_vfs_env.root_dir().close(key_dir.encrypt_handle);
|
||||
key_dir.encrypt_handle = nullptr;
|
||||
_vfs_env.root_dir().close(key_dir.decrypt_handle);
|
||||
key_dir.decrypt_handle = nullptr;
|
||||
key_dir.key_id = 0;
|
||||
|
||||
_mark_req_successful(channel, progress);
|
||||
return;
|
||||
}
|
||||
case Write_result::WRITE_ERR_WOULD_BLOCK:
|
||||
case Write_result::WRITE_ERR_INVALID:
|
||||
case Write_result::WRITE_ERR_IO:
|
||||
|
||||
_mark_req_failed(channel, progress, "write command");
|
||||
return;
|
||||
Constructible<Key_directory> *key_dir_ptr { nullptr };
|
||||
for (Constructible<Key_directory> &key_dir : _key_dirs)
|
||||
if (!key_dir.constructed())
|
||||
key_dir_ptr = &key_dir;
|
||||
if (!key_dir_ptr) {
|
||||
_mark_req_failed(progress, "find unused key dir");
|
||||
break;
|
||||
}
|
||||
key_dir_ptr->construct(*this, req._key_id);
|
||||
_mark_req_successful(progress);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
|
||||
return;
|
||||
case FILE_ERR: _mark_req_failed(progress, "file operation failed"); break;
|
||||
default: break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Crypto::_execute_encrypt_client_data(Channel &channel,
|
||||
bool &progress)
|
||||
void Crypto_channel::_remove_key(bool &progress)
|
||||
{
|
||||
Request &req { channel._request };
|
||||
switch (channel._state) {
|
||||
case Channel::SUBMITTED:
|
||||
Request &req { *_req_ptr };
|
||||
switch (_state) {
|
||||
case REQ_SUBMITTED: _remove_key_file.write(WRITE_OK, FILE_ERR, 0, { (char *)&req._key_id, sizeof(Key_id) }, progress); break;
|
||||
case WRITE_OK:
|
||||
|
||||
channel._state = Channel::OBTAIN_PLAINTEXT_BLK_PENDING;
|
||||
progress = true;
|
||||
return;
|
||||
_key_dir(req._key_id).destruct();
|
||||
_mark_req_successful(progress);
|
||||
break;
|
||||
|
||||
case Channel::OBTAIN_PLAINTEXT_BLK_COMPLETE:
|
||||
{
|
||||
if (!channel._generated_req_success) {
|
||||
|
||||
_mark_req_failed(channel, progress, "obtain plaintext block");
|
||||
return;
|
||||
}
|
||||
channel._vfs_handle = _lookup_key_dir(req._key_id).encrypt_handle;
|
||||
channel._vfs_handle->seek(req._pba * BLOCK_SIZE);
|
||||
size_t nr_of_written_bytes { 0 };
|
||||
|
||||
Const_byte_range_ptr dst {
|
||||
(char *)&channel._blk_buf, BLOCK_SIZE };
|
||||
|
||||
channel._vfs_handle->fs().write(
|
||||
channel._vfs_handle, dst, nr_of_written_bytes);
|
||||
|
||||
channel._state = Channel::OP_WRITTEN_TO_VFS_HANDLE;
|
||||
progress = true;
|
||||
return;
|
||||
}
|
||||
case Channel::OP_WRITTEN_TO_VFS_HANDLE:
|
||||
{
|
||||
channel._vfs_handle->seek(req._pba * BLOCK_SIZE);
|
||||
bool success {
|
||||
channel._vfs_handle->fs().queue_read(
|
||||
channel._vfs_handle, BLOCK_SIZE) };
|
||||
|
||||
if (!success)
|
||||
return;
|
||||
|
||||
channel._state = Channel::QUEUE_READ_SUCCEEDED;
|
||||
progress = true;
|
||||
return;
|
||||
}
|
||||
case Channel::QUEUE_READ_SUCCEEDED:
|
||||
{
|
||||
size_t nr_of_read_bytes { 0 };
|
||||
|
||||
Byte_range_ptr dst {
|
||||
(char *)req._ciphertext_blk_ptr, BLOCK_SIZE };
|
||||
|
||||
Read_result const result {
|
||||
channel._vfs_handle->fs().complete_read(
|
||||
channel._vfs_handle, dst, nr_of_read_bytes) };
|
||||
|
||||
switch (result) {
|
||||
case Read_result::READ_OK:
|
||||
|
||||
_mark_req_successful(channel, progress);
|
||||
return;
|
||||
|
||||
case Read_result::READ_QUEUED:
|
||||
case Read_result::READ_ERR_WOULD_BLOCK:
|
||||
|
||||
return;
|
||||
|
||||
case Read_result::READ_ERR_IO:
|
||||
case Read_result::READ_ERR_INVALID:
|
||||
|
||||
_mark_req_failed(channel, progress, "read ciphertext data");
|
||||
return;
|
||||
}
|
||||
}
|
||||
default:
|
||||
|
||||
return;
|
||||
case FILE_ERR: _mark_req_failed(progress, "file operation failed"); break;
|
||||
default: return;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Crypto::_execute_encrypt(Channel &channel,
|
||||
bool &progress)
|
||||
void Crypto_channel::_encrypt_client_data(bool &progress)
|
||||
{
|
||||
Request &req { channel._request };
|
||||
switch (channel._state) {
|
||||
case Channel::SUBMITTED:
|
||||
{
|
||||
channel._vfs_handle = _lookup_key_dir(req._key_id).encrypt_handle;
|
||||
channel._vfs_handle->seek(req._pba * BLOCK_SIZE);
|
||||
size_t nr_of_written_bytes { 0 };
|
||||
Request &req { *_req_ptr };
|
||||
switch (_state) {
|
||||
case REQ_SUBMITTED:
|
||||
|
||||
Const_byte_range_ptr src {
|
||||
(char *)req._plaintext_blk_ptr, BLOCK_SIZE };
|
||||
_generate_req<Client_data_request>(
|
||||
PLAINTEXT_BLK_OBTAINED, progress, Client_data_request::OBTAIN_PLAINTEXT_BLK,
|
||||
req._client_req_offset, req._client_req_tag, req._pba, req._vba, _blk);;
|
||||
break;
|
||||
|
||||
channel._vfs_handle->fs().write(
|
||||
channel._vfs_handle, src, nr_of_written_bytes);
|
||||
case PLAINTEXT_BLK_OBTAINED:
|
||||
|
||||
channel._state = Channel::OP_WRITTEN_TO_VFS_HANDLE;
|
||||
progress = true;
|
||||
return;
|
||||
}
|
||||
case Channel::OP_WRITTEN_TO_VFS_HANDLE:
|
||||
{
|
||||
channel._vfs_handle->seek(req._pba * BLOCK_SIZE);
|
||||
bool success {
|
||||
channel._vfs_handle->fs().queue_read(
|
||||
channel._vfs_handle, BLOCK_SIZE) };
|
||||
_key_dir(req._key_id)->encrypt_file.write(
|
||||
WRITE_OK, FILE_ERR, req._pba * BLOCK_SIZE, { (char *)&_blk, BLOCK_SIZE }, progress);
|
||||
break;
|
||||
|
||||
if (!success)
|
||||
return;
|
||||
case WRITE_OK:
|
||||
|
||||
channel._state = Channel::QUEUE_READ_SUCCEEDED;
|
||||
progress = true;
|
||||
return;
|
||||
}
|
||||
case Channel::QUEUE_READ_SUCCEEDED:
|
||||
{
|
||||
size_t nr_of_read_bytes { 0 };
|
||||
_key_dir(req._key_id)->encrypt_file.read(
|
||||
READ_OK, FILE_ERR, req._pba * BLOCK_SIZE, { (char *)&req._blk, BLOCK_SIZE }, progress);
|
||||
break;
|
||||
|
||||
Byte_range_ptr dst { (char *)req._ciphertext_blk_ptr, BLOCK_SIZE };
|
||||
|
||||
Read_result const result {
|
||||
channel._vfs_handle->fs().complete_read(
|
||||
channel._vfs_handle, dst, nr_of_read_bytes) };
|
||||
|
||||
switch (result) {
|
||||
case Read_result::READ_OK:
|
||||
|
||||
_mark_req_successful(channel, progress);
|
||||
return;
|
||||
|
||||
case Read_result::READ_QUEUED:
|
||||
case Read_result::READ_ERR_WOULD_BLOCK:
|
||||
|
||||
return;
|
||||
|
||||
case Read_result::READ_ERR_IO:
|
||||
case Read_result::READ_ERR_INVALID:
|
||||
|
||||
_mark_req_failed(channel, progress, "read ciphertext data");
|
||||
return;
|
||||
}
|
||||
}
|
||||
default:
|
||||
|
||||
return;
|
||||
case READ_OK: _mark_req_successful(progress); break;
|
||||
case FILE_ERR: _mark_req_failed(progress, "file operation"); break;
|
||||
default: break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Crypto::_execute_decrypt(Channel &channel,
|
||||
bool &progress)
|
||||
void Crypto_channel::_encrypt(bool &progress)
|
||||
{
|
||||
Request &req { channel._request };
|
||||
switch (channel._state) {
|
||||
case Channel::SUBMITTED:
|
||||
{
|
||||
channel._vfs_handle = _lookup_key_dir(req._key_id).decrypt_handle;
|
||||
channel._vfs_handle->seek(req._pba * BLOCK_SIZE);
|
||||
Request &req { *_req_ptr };
|
||||
switch (_state) {
|
||||
case REQ_SUBMITTED:
|
||||
|
||||
size_t nr_of_written_bytes { 0 };
|
||||
_key_dir(req._key_id)->encrypt_file.write(
|
||||
WRITE_OK, FILE_ERR, req._pba * BLOCK_SIZE, { (char *)&req._blk, BLOCK_SIZE }, progress);
|
||||
break;
|
||||
|
||||
Const_byte_range_ptr src {
|
||||
(char *)channel._request._ciphertext_blk_ptr, BLOCK_SIZE };
|
||||
case WRITE_OK:
|
||||
|
||||
channel._vfs_handle->fs().write(
|
||||
channel._vfs_handle, src, nr_of_written_bytes);
|
||||
_key_dir(req._key_id)->encrypt_file.read(
|
||||
READ_OK, FILE_ERR, req._pba * BLOCK_SIZE, { (char *)&req._blk, BLOCK_SIZE }, progress);
|
||||
break;
|
||||
|
||||
channel._state = Channel::OP_WRITTEN_TO_VFS_HANDLE;
|
||||
progress = true;
|
||||
return;
|
||||
}
|
||||
case Channel::OP_WRITTEN_TO_VFS_HANDLE:
|
||||
{
|
||||
channel._vfs_handle->seek(req._pba * BLOCK_SIZE);
|
||||
|
||||
bool success {
|
||||
channel._vfs_handle->fs().queue_read(
|
||||
channel._vfs_handle, BLOCK_SIZE) };
|
||||
|
||||
if (!success)
|
||||
return;
|
||||
|
||||
channel._state = Channel::QUEUE_READ_SUCCEEDED;
|
||||
progress = true;
|
||||
return;
|
||||
}
|
||||
case Channel::QUEUE_READ_SUCCEEDED:
|
||||
{
|
||||
size_t nr_of_read_bytes { 0 };
|
||||
Byte_range_ptr dst {
|
||||
(char *)req._plaintext_blk_ptr, BLOCK_SIZE };
|
||||
|
||||
Read_result const result {
|
||||
channel._vfs_handle->fs().complete_read(
|
||||
channel._vfs_handle, dst, nr_of_read_bytes) };
|
||||
|
||||
switch (result) {
|
||||
case Read_result::READ_OK:
|
||||
|
||||
_mark_req_successful(channel, progress);
|
||||
return;
|
||||
|
||||
case Read_result::READ_QUEUED:
|
||||
case Read_result::READ_ERR_WOULD_BLOCK:
|
||||
|
||||
return;
|
||||
|
||||
case Read_result::READ_ERR_IO:
|
||||
case Read_result::READ_ERR_INVALID:
|
||||
|
||||
_mark_req_failed(channel, progress, "read plaintext data");
|
||||
return;
|
||||
}
|
||||
return;
|
||||
}
|
||||
default:
|
||||
|
||||
return;
|
||||
case READ_OK: _mark_req_successful(progress); break;
|
||||
case FILE_ERR: _mark_req_failed(progress, "file operation"); break;
|
||||
default: break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Crypto::_execute_decrypt_client_data(Channel &channel,
|
||||
bool &progress)
|
||||
void Crypto_channel::_decrypt(bool &progress)
|
||||
{
|
||||
Request &req { channel._request };
|
||||
switch (channel._state) {
|
||||
case Channel::SUBMITTED:
|
||||
{
|
||||
channel._vfs_handle = _lookup_key_dir(req._key_id).decrypt_handle;
|
||||
if (channel._vfs_handle == nullptr) {
|
||||
_mark_req_failed(channel, progress, "lookup key dir");
|
||||
return;
|
||||
}
|
||||
channel._vfs_handle->seek(req._pba * BLOCK_SIZE);
|
||||
Request &req { *_req_ptr };
|
||||
switch (_state) {
|
||||
case REQ_SUBMITTED:
|
||||
|
||||
size_t nr_of_written_bytes { 0 };
|
||||
Const_byte_range_ptr src {
|
||||
(char *)channel._request._ciphertext_blk_ptr, BLOCK_SIZE };
|
||||
_key_dir(req._key_id)->decrypt_file.write(
|
||||
WRITE_OK, FILE_ERR, req._pba * BLOCK_SIZE, { (char *)&req._blk, BLOCK_SIZE }, progress);
|
||||
break;
|
||||
|
||||
channel._vfs_handle->fs().write(
|
||||
channel._vfs_handle, src, nr_of_written_bytes);
|
||||
case WRITE_OK:
|
||||
|
||||
channel._state = Channel::OP_WRITTEN_TO_VFS_HANDLE;
|
||||
progress = true;
|
||||
return;
|
||||
_key_dir(req._key_id)->decrypt_file.read(
|
||||
READ_OK, FILE_ERR, req._pba * BLOCK_SIZE, { (char *)&req._blk, BLOCK_SIZE }, progress);
|
||||
break;
|
||||
|
||||
case READ_OK: _mark_req_successful(progress); break;
|
||||
case FILE_ERR: _mark_req_failed(progress, "file operation"); break;
|
||||
default: break;
|
||||
}
|
||||
case Channel::OP_WRITTEN_TO_VFS_HANDLE:
|
||||
{
|
||||
channel._vfs_handle->seek(req._pba * BLOCK_SIZE);
|
||||
}
|
||||
|
||||
bool success {
|
||||
channel._vfs_handle->fs().queue_read(
|
||||
channel._vfs_handle, BLOCK_SIZE) };
|
||||
|
||||
if (!success)
|
||||
return;
|
||||
void Crypto_channel::_decrypt_client_data(bool &progress)
|
||||
{
|
||||
Request &req { *_req_ptr };
|
||||
switch (_state) {
|
||||
case REQ_SUBMITTED:
|
||||
|
||||
channel._state = Channel::QUEUE_READ_SUCCEEDED;
|
||||
progress = true;
|
||||
return;
|
||||
_key_dir(req._key_id)->decrypt_file.write(
|
||||
WRITE_OK, FILE_ERR, req._pba * BLOCK_SIZE, { (char *)&req._blk, BLOCK_SIZE }, progress);
|
||||
break;
|
||||
|
||||
case WRITE_OK:
|
||||
|
||||
_key_dir(req._key_id)->decrypt_file.read(
|
||||
READ_OK, FILE_ERR, req._pba * BLOCK_SIZE, { (char *)&_blk, BLOCK_SIZE }, progress);
|
||||
break;
|
||||
|
||||
case READ_OK:
|
||||
|
||||
_generate_req<Client_data_request>(
|
||||
PLAINTEXT_BLK_SUPPLIED, progress, Client_data_request::SUPPLY_PLAINTEXT_BLK,
|
||||
req._client_req_offset, req._client_req_tag, req._pba, req._vba, _blk);;
|
||||
break;
|
||||
|
||||
case PLAINTEXT_BLK_SUPPLIED: _mark_req_successful(progress); break;
|
||||
case FILE_ERR: _mark_req_failed(progress, "file operation failed"); break;
|
||||
default: break;
|
||||
}
|
||||
case Channel::QUEUE_READ_SUCCEEDED:
|
||||
{
|
||||
size_t nr_of_read_bytes { 0 };
|
||||
Byte_range_ptr dst {
|
||||
(char *)&channel._blk_buf, BLOCK_SIZE };
|
||||
}
|
||||
|
||||
Read_result const result {
|
||||
channel._vfs_handle->fs().complete_read(
|
||||
channel._vfs_handle, dst, nr_of_read_bytes) };
|
||||
|
||||
switch (result) {
|
||||
case Read_result::READ_OK:
|
||||
void Crypto_channel::_request_submitted(Module_request &mod_req)
|
||||
{
|
||||
_req_ptr = static_cast<Request *>(&mod_req);
|
||||
_state = REQ_SUBMITTED;
|
||||
}
|
||||
|
||||
channel._state = Channel::SUPPLY_PLAINTEXT_BLK_PENDING;
|
||||
progress = true;
|
||||
return;
|
||||
|
||||
case Read_result::READ_QUEUED:
|
||||
case Read_result::READ_ERR_WOULD_BLOCK:
|
||||
|
||||
return;
|
||||
|
||||
case Read_result::READ_ERR_IO:
|
||||
case Read_result::READ_ERR_INVALID:
|
||||
|
||||
_mark_req_failed(channel, progress, "read plaintext data");
|
||||
return;
|
||||
}
|
||||
return;
|
||||
}
|
||||
case Channel::SUPPLY_PLAINTEXT_BLK_COMPLETE:
|
||||
|
||||
if (!channel._generated_req_success) {
|
||||
|
||||
_mark_req_failed(channel, progress, "supply plaintext block");
|
||||
return;
|
||||
}
|
||||
_mark_req_successful(channel, progress);
|
||||
void Crypto_channel::execute(bool &progress)
|
||||
{
|
||||
if (!_req_ptr)
|
||||
return;
|
||||
|
||||
default:
|
||||
|
||||
return;
|
||||
switch (_req_ptr->_type) {
|
||||
case Request::ADD_KEY: _add_key(progress); break;
|
||||
case Request::REMOVE_KEY: _remove_key(progress); break;
|
||||
case Request::DECRYPT: _decrypt(progress); break;
|
||||
case Request::ENCRYPT: _encrypt(progress); break;
|
||||
case Request::DECRYPT_CLIENT_DATA: _decrypt_client_data(progress); break;
|
||||
case Request::ENCRYPT_CLIENT_DATA: _encrypt_client_data(progress); break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Crypto::execute(bool &progress)
|
||||
{
|
||||
for (Channel &channel : _channels) {
|
||||
|
||||
if (channel._state == Channel::INACTIVE)
|
||||
continue;
|
||||
|
||||
switch (channel._request._type) {
|
||||
case Request::ADD_KEY: _execute_add_key(channel, progress); break;
|
||||
case Request::REMOVE_KEY: _execute_remove_key(channel, progress); break;
|
||||
case Request::DECRYPT: _execute_decrypt(channel, progress); break;
|
||||
case Request::ENCRYPT: _execute_encrypt(channel, progress); break;
|
||||
case Request::DECRYPT_CLIENT_DATA: _execute_decrypt_client_data(channel, progress); break;
|
||||
case Request::ENCRYPT_CLIENT_DATA: _execute_encrypt_client_data(channel, progress); break;
|
||||
default:
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
}
|
||||
for_each_channel<Channel>([&] (Channel &chan) {
|
||||
chan.execute(progress); });
|
||||
}
|
||||
|
||||
|
||||
Crypto::Crypto(Vfs::Env &vfs_env,
|
||||
Xml_node const &xml_node)
|
||||
Crypto_channel::Crypto_channel(Module_channel_id id, Vfs::Env &vfs_env, Xml_node const &xml_node)
|
||||
:
|
||||
_vfs_env { vfs_env },
|
||||
_path { xml_node.attribute_value("path", String<32>()) },
|
||||
_add_key_handle { vfs_open_wo(_vfs_env, { _path.string(), "/add_key" }) },
|
||||
_remove_key_handle { vfs_open_wo(_vfs_env, { _path.string(), "/remove_key" }) }
|
||||
Module_channel { CRYPTO, id }, _vfs_env { vfs_env }, _path { xml_node.attribute_value("path", Tresor::Path()) }
|
||||
{ }
|
||||
|
||||
|
||||
void Crypto::generated_request_complete(Module_request &mod_req)
|
||||
Crypto::Crypto(Vfs::Env &vfs_env, Xml_node const &xml_node)
|
||||
{
|
||||
Module_request_id const id { mod_req.src_request_id() };
|
||||
if (id >= NR_OF_CHANNELS) {
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
switch (mod_req.dst_module_id()) {
|
||||
case CLIENT_DATA:
|
||||
{
|
||||
Client_data_request const &gen_req { *static_cast<Client_data_request *>(&mod_req) };
|
||||
switch (_channels[id]._state) {
|
||||
case Channel::OBTAIN_PLAINTEXT_BLK_IN_PROGRESS:
|
||||
_channels[id]._state = Channel::OBTAIN_PLAINTEXT_BLK_COMPLETE;
|
||||
_channels[id]._generated_req_success = gen_req.success();
|
||||
break;
|
||||
case Channel::SUPPLY_PLAINTEXT_BLK_IN_PROGRESS:
|
||||
_channels[id]._state = Channel::SUPPLY_PLAINTEXT_BLK_COMPLETE;
|
||||
_channels[id]._generated_req_success = gen_req.success();
|
||||
break;
|
||||
default:
|
||||
class Exception_2 { };
|
||||
throw Exception_2 { };
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
class Exception_3 { };
|
||||
throw Exception_3 { };
|
||||
Module_channel_id id { 0 };
|
||||
for (Constructible<Channel> &chan : _channels) {
|
||||
chan.construct(id++, vfs_env, xml_node);
|
||||
add_channel(*chan);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool Crypto::_peek_completed_request(uint8_t *buf_ptr,
|
||||
size_t buf_size)
|
||||
{
|
||||
for (Channel &channel : _channels) {
|
||||
if (channel._state == Channel::COMPLETE) {
|
||||
if (sizeof(channel._request) > buf_size) {
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
memcpy(buf_ptr, &channel._request, sizeof(channel._request));
|
||||
|
||||
Request &req { channel._request };
|
||||
if (VERBOSE_WRITE_VBA && req._type == Request::ENCRYPT_CLIENT_DATA) {
|
||||
|
||||
Hash hash { };
|
||||
calc_sha256_4k_hash(*(Block *)channel._blk_buf, hash);
|
||||
log(" encrypt leaf data: plaintext ", *(Block *)channel._blk_buf, " hash ", hash);
|
||||
log(" update branch:");
|
||||
log(" ", Branch_lvl_prefix("leaf data: "), *(Block *)req._ciphertext_blk_ptr);
|
||||
}
|
||||
if (VERBOSE_READ_VBA && req._type == Request::DECRYPT_CLIENT_DATA) {
|
||||
|
||||
Hash hash { };
|
||||
calc_sha256_4k_hash(*(Block *)channel._blk_buf, hash);
|
||||
log(" ", Branch_lvl_prefix("leaf data: "), *(Block *)req._ciphertext_blk_ptr);
|
||||
log(" decrypt leaf data: plaintext ", *(Block *)channel._blk_buf, " hash ", hash);
|
||||
}
|
||||
if (VERBOSE_CRYPTO) {
|
||||
|
||||
switch (req._type) {
|
||||
case Request::DECRYPT_CLIENT_DATA:
|
||||
case Request::ENCRYPT_CLIENT_DATA:
|
||||
{
|
||||
log("crypto: ", req.type_to_string(req._type),
|
||||
" pba ", req._pba,
|
||||
" vba ", req._vba,
|
||||
" plain ", *(Block *)channel._blk_buf,
|
||||
" cipher ", *(Block *)req._ciphertext_blk_ptr);
|
||||
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
void Crypto::_drop_completed_request(Module_request &req)
|
||||
{
|
||||
Module_request_id id { 0 };
|
||||
id = req.dst_request_id();
|
||||
if (id >= NR_OF_CHANNELS) {
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
if (_channels[id]._state != Channel::COMPLETE) {
|
||||
class Exception_2 { };
|
||||
throw Exception_2 { };
|
||||
}
|
||||
_channels[id]._state = Channel::INACTIVE;
|
||||
}
|
||||
|
||||
|
||||
bool Crypto::ready_to_submit_request()
|
||||
{
|
||||
for (Channel &channel : _channels) {
|
||||
if (channel._state == Channel::INACTIVE)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void Crypto::submit_request(Module_request &req)
|
||||
{
|
||||
for (Module_request_id id { 0 }; id < NR_OF_CHANNELS; id++) {
|
||||
if (_channels[id]._state == Channel::INACTIVE) {
|
||||
req.dst_request_id(id);
|
||||
_channels[id]._request = *static_cast<Request *>(&req);
|
||||
_channels[id]._state = Channel::SUBMITTED;
|
||||
return;
|
||||
}
|
||||
}
|
||||
class Invalid_call { };
|
||||
throw Invalid_call { };
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -11,493 +11,172 @@
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
/* base includes */
|
||||
#include <base/log.h>
|
||||
|
||||
/* tresor includes */
|
||||
#include <tresor/ft_check.h>
|
||||
#include <tresor/block_io.h>
|
||||
#include <tresor/sha256_4k_hash.h>
|
||||
#include <tresor/hash.h>
|
||||
|
||||
using namespace Tresor;
|
||||
|
||||
|
||||
/**********************
|
||||
** Ft_check_request **
|
||||
**********************/
|
||||
|
||||
Ft_check_request::Ft_check_request(uint64_t src_module_id,
|
||||
uint64_t src_request_id,
|
||||
Type type,
|
||||
Tree_level_index max_lvl,
|
||||
Tree_node_index max_child_idx,
|
||||
Number_of_leaves nr_of_leaves,
|
||||
Type_1_node root)
|
||||
Ft_check_request::Ft_check_request(Module_id src_mod, Module_channel_id src_chan, Tree_root const &ft, bool &success)
|
||||
:
|
||||
Module_request { src_module_id, src_request_id, FT_CHECK },
|
||||
_type { type },
|
||||
_max_lvl { max_lvl },
|
||||
_max_child_idx { max_child_idx },
|
||||
_nr_of_leaves { nr_of_leaves },
|
||||
_root { root }
|
||||
Module_request { src_mod, src_chan, FT_CHECK }, _ft { ft }, _success { success }
|
||||
{ }
|
||||
|
||||
|
||||
char const *Ft_check_request::type_to_string(Type type)
|
||||
bool Ft_check_channel::_execute_node(Tree_level_index lvl, Tree_node_index node_idx, bool &progress)
|
||||
{
|
||||
switch (type) {
|
||||
case INVALID: return "invalid";
|
||||
case CHECK: return "check";
|
||||
}
|
||||
return "?";
|
||||
}
|
||||
bool &check_node { _check_node[lvl][node_idx] };
|
||||
|
||||
if (check_node == false)
|
||||
return false;
|
||||
|
||||
/**************
|
||||
** Ft_check **
|
||||
**************/
|
||||
Request &req { *_req_ptr };
|
||||
switch (_state) {
|
||||
case REQ_IN_PROGRESS:
|
||||
|
||||
void Ft_check::_execute_inner_t2_child(Channel &chan,
|
||||
Tree_level_index lvl,
|
||||
Tree_node_index child_idx,
|
||||
bool &progress)
|
||||
{
|
||||
Request &req { chan._request };
|
||||
Child_state &child_state { chan._t1_lvls[lvl].children_state[child_idx] };
|
||||
Type_1_node const &child { chan._t1_lvls[lvl].children.nodes[child_idx] };
|
||||
Type_2_level &child_lvl { chan._t2_lvl };
|
||||
|
||||
if (child_state == Channel::READ_BLOCK) {
|
||||
|
||||
if (!child.valid()) {
|
||||
|
||||
if (chan._nr_of_leaves == 0) {
|
||||
|
||||
child_state = Channel::DONE;
|
||||
if (lvl == 1) {
|
||||
Type_2_node const &node { _t2_blk.nodes[node_idx] };
|
||||
if (!_num_remaining_leaves) {
|
||||
if (node.valid()) {
|
||||
_mark_req_failed(progress, { "lvl ", lvl, " node ", node_idx, " (", node,
|
||||
") valid but no leaves remaining" });
|
||||
break;
|
||||
}
|
||||
check_node = false;
|
||||
progress = true;
|
||||
|
||||
if (VERBOSE_CHECK)
|
||||
log(Level_indent { lvl, req._max_lvl },
|
||||
" lvl ", lvl, " child ", child_idx, " unused");
|
||||
|
||||
} else {
|
||||
|
||||
if (VERBOSE_CHECK)
|
||||
log(Level_indent { lvl, req._max_lvl },
|
||||
" lvl ", lvl, " child ", child_idx, " unexpectedly in use");
|
||||
|
||||
_mark_req_failed(chan, progress, "check for valid child");
|
||||
log(Level_indent { lvl, req._ft.max_lvl }, " lvl ", lvl, " node ", node_idx, " unused");
|
||||
break;
|
||||
}
|
||||
|
||||
} else if (!chan._gen_prim.valid()) {
|
||||
|
||||
chan._gen_prim = {
|
||||
.success = false,
|
||||
.tag = Channel::BLOCK_IO,
|
||||
.blk_nr = child.pba,
|
||||
.dropped = false };
|
||||
|
||||
chan._lvl_to_read = lvl - 1;
|
||||
_num_remaining_leaves--;
|
||||
check_node = false;
|
||||
progress = true;
|
||||
|
||||
if (VERBOSE_CHECK)
|
||||
log(Level_indent { lvl, req._max_lvl },
|
||||
" lvl ", lvl, " child ", child_idx, " (", child, "): load to lvl ", lvl - 1);
|
||||
|
||||
} else if (chan._gen_prim.tag != Channel::BLOCK_IO ||
|
||||
chan._gen_prim.blk_nr != child.pba) {
|
||||
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
|
||||
} else if (!chan._gen_prim.success) {
|
||||
|
||||
log(Level_indent { lvl, req._ft.max_lvl }, " lvl ", lvl, " node ", node_idx, " done");
|
||||
} else {
|
||||
|
||||
for (Child_state &state : child_lvl.children_state) {
|
||||
state = Channel::READ_BLOCK;
|
||||
}
|
||||
chan._gen_prim = { };
|
||||
child_state = Channel::CHECK_HASH;
|
||||
progress = true;
|
||||
}
|
||||
|
||||
} else if (child_state == Channel::CHECK_HASH) {
|
||||
|
||||
Block blk { };
|
||||
child_lvl.children.encode_to_blk(blk);
|
||||
|
||||
if (child.gen == INITIAL_GENERATION ||
|
||||
check_sha256_4k_hash(blk, child.hash)) {
|
||||
|
||||
child_state = Channel::DONE;
|
||||
progress = true;
|
||||
|
||||
if (VERBOSE_CHECK)
|
||||
log(Level_indent { lvl, req._max_lvl },
|
||||
" lvl ", lvl, " child ", child_idx, " has good hash");
|
||||
|
||||
} else {
|
||||
|
||||
if (VERBOSE_CHECK)
|
||||
log(Level_indent { lvl, req._max_lvl },
|
||||
" lvl ", lvl, " child ", child_idx, " has bad hash");
|
||||
|
||||
_mark_req_failed(chan, progress, "check inner hash");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Ft_check::_execute_inner_t1_child(Channel &chan,
|
||||
Type_1_node const &child,
|
||||
Type_1_level &child_lvl,
|
||||
Child_state &child_state,
|
||||
Tree_level_index lvl,
|
||||
Tree_node_index child_idx,
|
||||
bool &progress)
|
||||
{
|
||||
Request &req { chan._request };
|
||||
if (child_state == Channel::READ_BLOCK) {
|
||||
|
||||
if (!child.valid()) {
|
||||
|
||||
if (chan._nr_of_leaves == 0) {
|
||||
|
||||
child_state = Channel::DONE;
|
||||
Type_1_node const &node { _t1_blks.items[lvl].nodes[node_idx] };
|
||||
if (!node.valid()) {
|
||||
if (_num_remaining_leaves) {
|
||||
_mark_req_failed(progress, { "lvl ", lvl, " node ", node_idx, " invalid but ",
|
||||
_num_remaining_leaves, " leaves remaining" });
|
||||
break;
|
||||
}
|
||||
check_node = false;
|
||||
progress = true;
|
||||
|
||||
if (VERBOSE_CHECK)
|
||||
log(Level_indent { lvl, req._max_lvl },
|
||||
" lvl ", lvl, " child ", child_idx, " unused");
|
||||
|
||||
} else {
|
||||
|
||||
if (VERBOSE_CHECK)
|
||||
log(Level_indent { lvl, req._max_lvl },
|
||||
" lvl ", lvl, " child ", child_idx, " unexpectedly in use");
|
||||
|
||||
_mark_req_failed(chan, progress, "check for valid child");
|
||||
log(Level_indent { lvl, req._ft.max_lvl }, " lvl ", lvl, " node ", node_idx, " unused");
|
||||
break;
|
||||
}
|
||||
|
||||
} else if (!chan._gen_prim.valid()) {
|
||||
|
||||
chan._gen_prim = {
|
||||
.success = false,
|
||||
.tag = Channel::BLOCK_IO,
|
||||
.blk_nr = child.pba,
|
||||
.dropped = false };
|
||||
|
||||
chan._lvl_to_read = lvl - 1;
|
||||
progress = true;
|
||||
|
||||
_generate_req<Block_io::Read>(READ_BLK_SUCCEEDED, progress, node.pba, _blk);
|
||||
if (VERBOSE_CHECK)
|
||||
log(Level_indent { lvl, req._max_lvl },
|
||||
" lvl ", lvl, " child ", child_idx, " (", child, "): load to lvl ", lvl - 1);
|
||||
|
||||
} else if (chan._gen_prim.tag != Channel::BLOCK_IO ||
|
||||
chan._gen_prim.blk_nr != child.pba) {
|
||||
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
|
||||
} else if (!chan._gen_prim.success) {
|
||||
|
||||
} else {
|
||||
|
||||
for (Child_state &state : child_lvl.children_state) {
|
||||
state = Channel::READ_BLOCK;
|
||||
}
|
||||
chan._gen_prim = { };
|
||||
child_state = Channel::CHECK_HASH;
|
||||
progress = true;
|
||||
log(Level_indent { lvl, req._ft.max_lvl }, " lvl ", lvl, " node ", node_idx,
|
||||
" (", node, "): load to lvl ", lvl - 1);
|
||||
}
|
||||
break;
|
||||
|
||||
} else if (child_state == Channel::CHECK_HASH) {
|
||||
|
||||
Block blk { };
|
||||
child_lvl.children.encode_to_blk(blk);
|
||||
|
||||
if (child.gen == INITIAL_GENERATION ||
|
||||
check_sha256_4k_hash(blk, child.hash)) {
|
||||
|
||||
child_state = Channel::DONE;
|
||||
if (&child_state == &chan._root_state) {
|
||||
chan._request._success = true;
|
||||
}
|
||||
progress = true;
|
||||
|
||||
if (VERBOSE_CHECK)
|
||||
log(Level_indent { lvl, req._max_lvl },
|
||||
" lvl ", lvl, " child ", child_idx, " has good hash");
|
||||
|
||||
} else {
|
||||
|
||||
if (VERBOSE_CHECK)
|
||||
log(Level_indent { lvl, req._max_lvl },
|
||||
" lvl ", lvl, " child ", child_idx, " has bad hash");
|
||||
|
||||
_mark_req_failed(chan, progress, "check inner hash");
|
||||
case READ_BLK_SUCCEEDED:
|
||||
{
|
||||
Type_1_node const &node { _t1_blks.items[lvl].nodes[node_idx] };
|
||||
if (node.gen != INITIAL_GENERATION && !check_hash(_blk, node.hash)) {
|
||||
_mark_req_failed(progress, { "lvl ", lvl, " node ", node_idx, " (", node, ") has bad hash" });
|
||||
break;
|
||||
}
|
||||
if (lvl == 2)
|
||||
_t2_blk.decode_from_blk(_blk);
|
||||
else
|
||||
_t1_blks.items[lvl - 1].decode_from_blk(_blk);
|
||||
for (bool &cn : _check_node[lvl - 1])
|
||||
cn = true;
|
||||
|
||||
_state = REQ_IN_PROGRESS;
|
||||
check_node = false;
|
||||
progress = true;
|
||||
if (VERBOSE_CHECK)
|
||||
log(Level_indent { lvl, req._ft.max_lvl }, " lvl ", lvl, " node ", node_idx, " has good hash");
|
||||
break;
|
||||
}
|
||||
default: break;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
void Ft_check::_execute_leaf_child(Channel &chan,
|
||||
Tree_node_index child_idx,
|
||||
bool &progress)
|
||||
void Ft_check_channel::execute(bool &progress)
|
||||
{
|
||||
Request &req { chan._request };
|
||||
Type_2_node const &child { chan._t2_lvl.children.nodes[child_idx] };
|
||||
Child_state &child_state { chan._t2_lvl.children_state[child_idx] };
|
||||
if (!_req_ptr)
|
||||
return;
|
||||
|
||||
if (child_state == Channel::READ_BLOCK) {
|
||||
if (_state == REQ_SUBMITTED) {
|
||||
for (Tree_level_index lvl { 1 }; lvl <= _req_ptr->_ft.max_lvl + 1; lvl++)
|
||||
for (Tree_node_index node_idx { 0 }; node_idx < _req_ptr->_ft.degree; node_idx++)
|
||||
_check_node[lvl][node_idx] = false;
|
||||
|
||||
if (chan._nr_of_leaves == 0) {
|
||||
|
||||
if (child.valid()) {
|
||||
|
||||
if (VERBOSE_CHECK)
|
||||
log(Level_indent { 1, req._max_lvl },
|
||||
" lvl 1 child ", child_idx, " unexpectedly in use");
|
||||
|
||||
_mark_req_failed(chan, progress, "check for unused child");
|
||||
|
||||
} else {
|
||||
|
||||
child_state = Channel::DONE;
|
||||
progress = true;
|
||||
|
||||
if (VERBOSE_CHECK)
|
||||
log(Level_indent { 1, req._max_lvl },
|
||||
" lvl 1 child ", child_idx, " unused");
|
||||
}
|
||||
|
||||
} else {
|
||||
|
||||
chan._nr_of_leaves--;
|
||||
child_state = Channel::DONE;
|
||||
progress = true;
|
||||
|
||||
if (VERBOSE_CHECK)
|
||||
log(Level_indent { 1, req._max_lvl },
|
||||
" lvl 1 child ", child_idx, " done");
|
||||
|
||||
}
|
||||
_num_remaining_leaves = _req_ptr->_ft.num_leaves;
|
||||
_t1_blks.items[_req_ptr->_ft.max_lvl + 1].nodes[0] = _req_ptr->_ft.t1_node();
|
||||
_check_node[_req_ptr->_ft.max_lvl + 1][0] = true;
|
||||
_state = REQ_IN_PROGRESS;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Ft_check::_execute_check(Channel &chan,
|
||||
bool &progress)
|
||||
{
|
||||
Request &req { chan._request };
|
||||
for (Tree_node_index child_idx { 0 };
|
||||
child_idx <= req._max_child_idx;
|
||||
child_idx++) {
|
||||
|
||||
if (chan._t2_lvl.children_state[child_idx] != Channel::DONE) {
|
||||
|
||||
_execute_leaf_child(chan, child_idx, progress);
|
||||
return;
|
||||
}
|
||||
}
|
||||
for (Tree_level_index lvl { FT_LOWEST_T1_LVL }; lvl <= req._max_lvl; lvl++) {
|
||||
|
||||
for (Tree_node_index child_idx { 0 };
|
||||
child_idx <= req._max_child_idx;
|
||||
child_idx++) {
|
||||
|
||||
Type_1_level &t1_lvl { chan._t1_lvls[lvl] };
|
||||
if (t1_lvl.children_state[child_idx] != Channel::DONE) {
|
||||
|
||||
if (lvl == FT_LOWEST_T1_LVL)
|
||||
_execute_inner_t2_child(
|
||||
chan, lvl, child_idx, progress);
|
||||
else
|
||||
_execute_inner_t1_child(
|
||||
chan,
|
||||
chan._t1_lvls[lvl].children.nodes[child_idx],
|
||||
chan._t1_lvls[lvl - 1],
|
||||
chan._t1_lvls[lvl].children_state[child_idx],
|
||||
lvl, child_idx, progress);
|
||||
|
||||
for (Tree_level_index lvl { 1 }; lvl <= _req_ptr->_ft.max_lvl + 1; lvl++)
|
||||
for (Tree_node_index node_idx { 0 }; node_idx < _req_ptr->_ft.degree; node_idx++)
|
||||
if (_execute_node(lvl, node_idx, progress))
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (chan._root_state != Channel::DONE) {
|
||||
|
||||
_execute_inner_t1_child(
|
||||
chan, req._root, chan._t1_lvls[req._max_lvl], chan._root_state,
|
||||
req._max_lvl + 1, 0, progress);
|
||||
_mark_req_successful(progress);
|
||||
}
|
||||
|
||||
|
||||
void Ft_check_channel::_generated_req_completed(State_uint state_uint)
|
||||
{
|
||||
if (!_generated_req_success) {
|
||||
error("ft check: request (", *_req_ptr, ") failed because generated request failed)");
|
||||
_req_ptr->_success = false;
|
||||
_state = REQ_COMPLETE;
|
||||
_req_ptr = nullptr;
|
||||
return;
|
||||
}
|
||||
_state = (State)state_uint;
|
||||
}
|
||||
|
||||
|
||||
void Ft_check::_mark_req_failed(Channel &chan,
|
||||
bool &progress,
|
||||
char const *str)
|
||||
void Ft_check_channel::_mark_req_failed(bool &progress, Error_string str)
|
||||
{
|
||||
error("ft check: request (", chan._request, ") failed at step \"", str, "\"");
|
||||
chan._request._success = false;
|
||||
chan._root_state = Channel::DONE;
|
||||
error("ft check request (", *_req_ptr, ") failed: ", str);
|
||||
_req_ptr->_success = false;
|
||||
_state = REQ_COMPLETE;
|
||||
_req_ptr = nullptr;
|
||||
progress = true;
|
||||
}
|
||||
|
||||
|
||||
bool Ft_check::_peek_completed_request(uint8_t *buf_ptr,
|
||||
size_t buf_size)
|
||||
void Ft_check_channel::_mark_req_successful(bool &progress)
|
||||
{
|
||||
for (Channel &chan : _channels) {
|
||||
|
||||
if (chan._request._type != Request::INVALID &&
|
||||
chan._root_state == Channel::DONE) {
|
||||
|
||||
if (sizeof(chan._request) > buf_size) {
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
memcpy(buf_ptr, &chan._request, sizeof(chan._request));
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
_req_ptr->_success = true;
|
||||
_state = REQ_COMPLETE;
|
||||
_req_ptr = nullptr;
|
||||
progress = true;
|
||||
}
|
||||
|
||||
|
||||
void Ft_check::_drop_completed_request(Module_request &req)
|
||||
void Ft_check_channel::_request_submitted(Module_request &mod_req)
|
||||
{
|
||||
Module_request_id id { 0 };
|
||||
id = req.dst_request_id();
|
||||
if (id >= NR_OF_CHANNELS) {
|
||||
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
Channel &chan { _channels[id] };
|
||||
if (chan._request._type == Request::INVALID &&
|
||||
chan._root_state != Channel::DONE) {
|
||||
|
||||
class Exception_2 { };
|
||||
throw Exception_2 { };
|
||||
}
|
||||
chan = Channel { };
|
||||
_req_ptr = static_cast<Request *>(&mod_req);
|
||||
_state = REQ_SUBMITTED;
|
||||
}
|
||||
|
||||
|
||||
bool Ft_check::_peek_generated_request(uint8_t *buf_ptr,
|
||||
size_t buf_size)
|
||||
Ft_check::Ft_check()
|
||||
{
|
||||
for (Module_request_id id { 0 }; id < NR_OF_CHANNELS; id++) {
|
||||
|
||||
Channel &chan { _channels[id] };
|
||||
|
||||
if (!chan._gen_prim.valid() || chan._gen_prim.dropped)
|
||||
continue;
|
||||
|
||||
switch (chan._gen_prim.tag) {
|
||||
case Channel::BLOCK_IO:
|
||||
|
||||
construct_in_buf<Block_io_request>(
|
||||
buf_ptr, buf_size, FT_CHECK, id,
|
||||
Block_io_request::READ, 0, 0, 0,
|
||||
chan._gen_prim.blk_nr, 0, 1,
|
||||
(void *)&chan._encoded_blk, nullptr);
|
||||
|
||||
return true;
|
||||
|
||||
default:
|
||||
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
Module_channel_id id { 0 };
|
||||
for (Constructible<Channel> &chan : _channels) {
|
||||
chan.construct(id++);
|
||||
add_channel(*chan);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
void Ft_check::_drop_generated_request(Module_request &req)
|
||||
{
|
||||
Module_request_id const id { req.src_request_id() };
|
||||
if (id >= NR_OF_CHANNELS) {
|
||||
class Exception_0 { };
|
||||
throw Exception_0 { };
|
||||
}
|
||||
Channel &chan { _channels[id] };
|
||||
chan._gen_prim.dropped = true;
|
||||
}
|
||||
|
||||
|
||||
void Ft_check::generated_request_complete(Module_request &mod_req)
|
||||
{
|
||||
Module_request_id const id { mod_req.src_request_id() };
|
||||
if (id >= NR_OF_CHANNELS) {
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
Channel &chan { _channels[id] };
|
||||
switch (mod_req.dst_module_id()) {
|
||||
case BLOCK_IO:
|
||||
{
|
||||
Block_io_request &gen_req { *static_cast<Block_io_request*>(&mod_req) };
|
||||
chan._gen_prim.success = gen_req.success();
|
||||
if (chan._lvl_to_read == 1)
|
||||
chan._t2_lvl.children.decode_from_blk(chan._encoded_blk);
|
||||
else
|
||||
chan._t1_lvls[chan._lvl_to_read].children.decode_from_blk(chan._encoded_blk);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
class Exception_8 { };
|
||||
throw Exception_8 { };
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool Ft_check::ready_to_submit_request()
|
||||
{
|
||||
for (Channel &chan : _channels) {
|
||||
if (chan._request._type == Request::INVALID)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
void Ft_check::submit_request(Module_request &req)
|
||||
{
|
||||
for (Module_request_id id { 0 }; id < NR_OF_CHANNELS; id++) {
|
||||
Channel &chan { _channels[id] };
|
||||
if (chan._request._type == Request::INVALID) {
|
||||
req.dst_request_id(id);
|
||||
chan._request = *static_cast<Request *>(&req);
|
||||
chan._nr_of_leaves = chan._request._nr_of_leaves;
|
||||
chan._root_state = Channel::READ_BLOCK;
|
||||
return;
|
||||
}
|
||||
}
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
|
||||
|
||||
void Ft_check::execute(bool &progress)
|
||||
{
|
||||
for (Channel &chan : _channels) {
|
||||
|
||||
Request &req { chan._request };
|
||||
switch (req._type) {
|
||||
case Request::CHECK:
|
||||
|
||||
_execute_check(chan, progress);
|
||||
break;
|
||||
|
||||
default:
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
for_each_channel<Channel>([&] (Channel &chan) {
|
||||
chan.execute(progress); });
|
||||
}
|
||||
|
@ -1,5 +1,6 @@
|
||||
/*
|
||||
* \brief Module for initializing the free tree
|
||||
* \author Martin Stein
|
||||
* \author Josef Soentgen
|
||||
* \date 2023-03-09
|
||||
*/
|
||||
@ -11,751 +12,222 @@
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
/* base includes */
|
||||
#include <base/log.h>
|
||||
|
||||
/* tresor includes */
|
||||
#include <tresor/block_allocator.h>
|
||||
#include <tresor/block_io.h>
|
||||
#include <tresor/sha256_4k_hash.h>
|
||||
#include <tresor/hash.h>
|
||||
#include <tresor/ft_initializer.h>
|
||||
|
||||
using namespace Tresor;
|
||||
|
||||
static constexpr bool DEBUG = false;
|
||||
|
||||
|
||||
Ft_initializer_request::Ft_initializer_request(Module_id src_module_id,
|
||||
Module_request_id src_request_id)
|
||||
Ft_initializer_request::Ft_initializer_request(Module_id src_mod, Module_channel_id src_chan,
|
||||
Tree_root &ft, Pba_allocator &pba_alloc, bool &success)
|
||||
:
|
||||
Module_request { src_module_id, src_request_id, FT_INITIALIZER }
|
||||
Module_request { src_mod, src_chan, FT_INITIALIZER }, _ft { ft }, _pba_alloc { pba_alloc }, _success { success }
|
||||
{ }
|
||||
|
||||
|
||||
void Ft_initializer_request::create(void *buf_ptr,
|
||||
size_t buf_size,
|
||||
uint64_t src_module_id,
|
||||
uint64_t src_request_id,
|
||||
size_t req_type,
|
||||
uint64_t max_level_idx,
|
||||
uint64_t max_child_idx,
|
||||
uint64_t nr_of_leaves)
|
||||
bool Ft_initializer_channel::_execute_t2_node(Tree_node_index node_idx, bool &progress)
|
||||
{
|
||||
Ft_initializer_request req { src_module_id, src_request_id };
|
||||
Node_state &node_state { _t2_node_states[node_idx] };
|
||||
Type_2_node &node { _t2_blk.nodes[node_idx] };
|
||||
switch (node_state) {
|
||||
case DONE: return false;
|
||||
case INIT_BLOCK:
|
||||
|
||||
req._type = (Type)req_type;
|
||||
req._max_level_idx = max_level_idx;
|
||||
req._max_child_idx = max_child_idx;
|
||||
req._nr_of_leaves = nr_of_leaves;
|
||||
|
||||
if (sizeof(req) > buf_size) {
|
||||
class Bad_size_0 { };
|
||||
throw Bad_size_0 { };
|
||||
}
|
||||
memcpy(buf_ptr, &req, sizeof(req));
|
||||
}
|
||||
|
||||
|
||||
char const *Ft_initializer_request::type_to_string(Type type)
|
||||
{
|
||||
switch (type) {
|
||||
case INVALID: return "invalid";
|
||||
case INIT: return "init";
|
||||
}
|
||||
return "?";
|
||||
}
|
||||
|
||||
|
||||
void Ft_initializer::_execute_leaf_child(Channel &channel,
|
||||
bool &progress,
|
||||
uint64_t &nr_of_leaves,
|
||||
Type_2_node &child,
|
||||
Ft_initializer_channel::Child_state &child_state,
|
||||
uint64_t child_index)
|
||||
{
|
||||
using CS = Ft_initializer_channel::Child_state;
|
||||
|
||||
switch (child_state) {
|
||||
case CS::INIT_BLOCK:
|
||||
child_state = CS::INIT_NODE;
|
||||
node_state = INIT_NODE;
|
||||
progress = true;
|
||||
return;
|
||||
break;
|
||||
|
||||
case CS::INIT_NODE:
|
||||
if (nr_of_leaves == 0) {
|
||||
case INIT_NODE:
|
||||
|
||||
if (DEBUG)
|
||||
log("[ft_init] node: ", 1, " ", child_index,
|
||||
" assign pba 0, leaf unused");
|
||||
|
||||
Ft_initializer_channel::reset_node(child);
|
||||
child_state = CS::DONE;
|
||||
progress = true;
|
||||
} else {
|
||||
|
||||
switch (channel._state) {
|
||||
case Channel::IN_PROGRESS:
|
||||
channel._state = Channel::BLOCK_ALLOC_PENDING;
|
||||
progress = true;
|
||||
break;
|
||||
|
||||
case Channel::BLOCK_ALLOC_PENDING:
|
||||
break;
|
||||
|
||||
case Channel::BLOCK_ALLOC_IN_PROGRESS:
|
||||
break;
|
||||
|
||||
case Channel::BLOCK_ALLOC_COMPLETE:
|
||||
/* bail early in case the allocator failed */
|
||||
if (!channel._generated_req_success) {
|
||||
_mark_req_failed(channel, progress,
|
||||
"allocate block for FT initialization");
|
||||
break;
|
||||
}
|
||||
channel._state = Channel::IN_PROGRESS;
|
||||
|
||||
Ft_initializer_channel::reset_node(child);
|
||||
|
||||
child.pba = channel._blk_nr;
|
||||
child_state = CS::DONE;
|
||||
--nr_of_leaves;
|
||||
progress = true;
|
||||
|
||||
if (DEBUG)
|
||||
log("[ft_init] node: ", 1, " ", child_index,
|
||||
" assign pba: ", channel._blk_nr, " leaves left: ",
|
||||
nr_of_leaves);
|
||||
break;
|
||||
|
||||
default:
|
||||
if (_num_remaining_leaves) {
|
||||
node = { };
|
||||
if (!_req_ptr->_pba_alloc.alloc(node.pba)) {
|
||||
_mark_req_failed(progress, "allocate pba");
|
||||
break;
|
||||
}
|
||||
node_state = DONE;
|
||||
_num_remaining_leaves--;
|
||||
progress = true;
|
||||
if (VERBOSE_FT_INIT)
|
||||
log("[ft_init] node: ", 1, " ", node_idx, " assign pba: ", node.pba, " leaves left: ", _num_remaining_leaves);
|
||||
} else {
|
||||
node = { };
|
||||
node_state = DONE;
|
||||
progress = true;
|
||||
if (VERBOSE_FT_INIT)
|
||||
log("[ft_init] node: ", 1, " ", node_idx, " assign pba 0, leaf unused");
|
||||
}
|
||||
default:
|
||||
break;
|
||||
|
||||
case WRITE_BLK: ASSERT_NEVER_REACHED;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
void Ft_initializer::_execute_inner_t2_child(Channel &channel,
|
||||
bool &progress,
|
||||
uint64_t nr_of_leaves,
|
||||
uint64_t &level_to_write,
|
||||
Type_1_node &child,
|
||||
Ft_initializer_channel::Type_2_level &child_level,
|
||||
Ft_initializer_channel::Child_state &child_state,
|
||||
uint64_t level_index,
|
||||
uint64_t child_index)
|
||||
|
||||
bool Ft_initializer_channel::_execute_t1_node(Tree_level_index lvl, Tree_node_index node_idx, bool &progress)
|
||||
{
|
||||
using CS = Ft_initializer_channel::Child_state;
|
||||
Type_1_node &node { _t1_blks.items[lvl].nodes[node_idx] };
|
||||
Node_state &node_state { _t1_node_states[lvl][node_idx] };
|
||||
switch (node_state) {
|
||||
case DONE: return false;
|
||||
case INIT_BLOCK:
|
||||
|
||||
switch (child_state) {
|
||||
case CS::INIT_BLOCK:
|
||||
|
||||
if (nr_of_leaves == 0) {
|
||||
|
||||
if (DEBUG)
|
||||
log("[ft_init] node: ", level_index, " ", child_index,
|
||||
" assign pba 0, inner node unused");
|
||||
|
||||
Ft_initializer_channel::reset_node(child);
|
||||
child_state = CS::DONE;
|
||||
if (_num_remaining_leaves) {
|
||||
_reset_level(lvl - 1, INIT_BLOCK);
|
||||
node_state = INIT_NODE;
|
||||
progress = true;
|
||||
return;
|
||||
if (VERBOSE_FT_INIT)
|
||||
log("[ft_init] node: ", lvl, " ", node_idx, " reset level: ", lvl - 1);
|
||||
} else {
|
||||
|
||||
if (DEBUG)
|
||||
log("[ft_init] node: ", level_index, " ", child_index,
|
||||
" reset level: ", level_index - 1);
|
||||
|
||||
Ft_initializer_channel::reset_level(child_level, CS::INIT_BLOCK);
|
||||
child_state = CS::INIT_NODE;
|
||||
node = { };
|
||||
node_state = DONE;
|
||||
progress = true;
|
||||
return;
|
||||
if (VERBOSE_FT_INIT)
|
||||
log("[ft_init] node: ", lvl, " ", node_idx, " assign pba 0, unused");
|
||||
}
|
||||
break;
|
||||
|
||||
case CS::INIT_NODE:
|
||||
|
||||
switch (channel._state) {
|
||||
case Channel::IN_PROGRESS:
|
||||
channel._state = Channel::BLOCK_ALLOC_PENDING;
|
||||
progress = true;
|
||||
break;
|
||||
|
||||
case Channel::BLOCK_ALLOC_PENDING:
|
||||
break;
|
||||
|
||||
case Channel::BLOCK_ALLOC_IN_PROGRESS:
|
||||
break;
|
||||
|
||||
case Channel::BLOCK_ALLOC_COMPLETE:
|
||||
{
|
||||
/* bail early in case the allocator failed */
|
||||
if (!channel._generated_req_success) {
|
||||
_mark_req_failed(channel, progress,
|
||||
"allocate block for FT initialization");
|
||||
break;
|
||||
}
|
||||
channel._state = Channel::IN_PROGRESS;
|
||||
|
||||
Ft_initializer_channel::reset_node(child);
|
||||
child.pba = channel._blk_nr;
|
||||
|
||||
Block blk { };
|
||||
child_level.children.encode_to_blk(blk);
|
||||
calc_sha256_4k_hash(blk, child.hash);
|
||||
|
||||
child_state = CS::WRITE_BLOCK;
|
||||
progress = true;
|
||||
|
||||
if (DEBUG)
|
||||
log("[ft_init] node: ", level_index, " ", child_index,
|
||||
" assign pba: ", channel._blk_nr);
|
||||
case INIT_NODE:
|
||||
{
|
||||
node = { };
|
||||
if (!_req_ptr->_pba_alloc.alloc(node.pba)) {
|
||||
_mark_req_failed(progress, "allocate pba");
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
||||
case CS::WRITE_BLOCK:
|
||||
|
||||
switch (channel._state) {
|
||||
case Channel::IN_PROGRESS:
|
||||
{
|
||||
channel._state = Channel::BLOCK_IO_PENDING;
|
||||
channel._child_pba = child.pba;
|
||||
level_to_write = level_index - 1;
|
||||
progress = true;
|
||||
break;
|
||||
}
|
||||
|
||||
case Channel::BLOCK_IO_PENDING:
|
||||
break;
|
||||
|
||||
case Channel::BLOCK_IO_IN_PROGRESS:
|
||||
break;
|
||||
|
||||
case Channel::BLOCK_IO_COMPLETE:
|
||||
/* bail early in case the allocator failed */
|
||||
if (!channel._generated_req_success) {
|
||||
_mark_req_failed(channel, progress,
|
||||
"write block for FT initialization");
|
||||
break;
|
||||
}
|
||||
channel._state = Channel::IN_PROGRESS;
|
||||
|
||||
child_state = CS::DONE;
|
||||
progress = true;
|
||||
|
||||
if (DEBUG)
|
||||
log("[ft_init] node: ", level_index, " ", child_index,
|
||||
" write pba: ", channel._child_pba, " level: ",
|
||||
level_index -1, " (child: ", child, ")");
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
break;
|
||||
|
||||
default:
|
||||
if (lvl == 2)
|
||||
_t2_blk.encode_to_blk(_blk);
|
||||
else
|
||||
_t1_blks.items[lvl - 1].encode_to_blk(_blk);
|
||||
calc_hash(_blk, node.hash);
|
||||
generate_req<Block_io::Write>(EXECUTE_NODES, progress, node.pba, _blk, _generated_req_success);
|
||||
_state = REQ_GENERATED;
|
||||
node_state = WRITE_BLK;
|
||||
progress = true;
|
||||
if (VERBOSE_FT_INIT)
|
||||
log("[ft_init] node: ", lvl, " ", node_idx, " assign pba: ", node.pba);
|
||||
break;
|
||||
}
|
||||
case WRITE_BLK:
|
||||
|
||||
node_state = DONE;
|
||||
progress = true;
|
||||
if (VERBOSE_FT_INIT)
|
||||
log("[ft_init] node: ", lvl, " ", node_idx, " write pba: ", node.pba, " level: ", lvl - 1, " (node: ", node, ")");
|
||||
break;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
void Ft_initializer::_execute_inner_t1_child(Channel &channel,
|
||||
bool &progress,
|
||||
uint64_t nr_of_leaves,
|
||||
uint64_t &level_to_write,
|
||||
Type_1_node &child,
|
||||
Ft_initializer_channel::Type_1_level &child_level,
|
||||
Ft_initializer_channel::Child_state &child_state,
|
||||
uint64_t level_index,
|
||||
uint64_t child_index)
|
||||
|
||||
void Ft_initializer_channel::_generated_req_completed(State_uint state_uint)
|
||||
{
|
||||
using CS = Ft_initializer_channel::Child_state;
|
||||
|
||||
switch (child_state) {
|
||||
case CS::INIT_BLOCK:
|
||||
|
||||
if (nr_of_leaves == 0) {
|
||||
|
||||
if (DEBUG)
|
||||
log("[ft_init] node: ", level_index, " ", child_index,
|
||||
" assign pba 0, inner node unused");
|
||||
|
||||
Ft_initializer_channel::reset_node(child);
|
||||
child_state = CS::DONE;
|
||||
progress = true;
|
||||
return;
|
||||
} else {
|
||||
|
||||
if (DEBUG)
|
||||
log("[ft_init] node: ", level_index, " ", child_index,
|
||||
" reset level: ", level_index - 1);
|
||||
|
||||
Ft_initializer_channel::reset_level(child_level, CS::INIT_BLOCK);
|
||||
child_state = CS::INIT_NODE;
|
||||
progress = true;
|
||||
return;
|
||||
}
|
||||
break;
|
||||
|
||||
case CS::INIT_NODE:
|
||||
|
||||
switch (channel._state) {
|
||||
case Channel::IN_PROGRESS:
|
||||
channel._state = Channel::BLOCK_ALLOC_PENDING;
|
||||
progress = true;
|
||||
break;
|
||||
|
||||
case Channel::BLOCK_ALLOC_PENDING:
|
||||
break;
|
||||
|
||||
case Channel::BLOCK_ALLOC_IN_PROGRESS:
|
||||
break;
|
||||
|
||||
case Channel::BLOCK_ALLOC_COMPLETE:
|
||||
{
|
||||
/* bail early in case the allocator failed */
|
||||
if (!channel._generated_req_success) {
|
||||
_mark_req_failed(channel, progress,
|
||||
"allocate block for FT initialization");
|
||||
break;
|
||||
}
|
||||
channel._state = Channel::IN_PROGRESS;
|
||||
|
||||
Ft_initializer_channel::reset_node(child);
|
||||
child.pba = channel._blk_nr;
|
||||
|
||||
Block blk { };
|
||||
child_level.children.encode_to_blk(blk);
|
||||
calc_sha256_4k_hash(blk, child.hash);
|
||||
|
||||
child_state = CS::WRITE_BLOCK;
|
||||
progress = true;
|
||||
|
||||
if (DEBUG)
|
||||
log("[ft_init] node: ", level_index, " ", child_index,
|
||||
" assign pba: ", channel._blk_nr);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
||||
case CS::WRITE_BLOCK:
|
||||
|
||||
switch (channel._state) {
|
||||
case Channel::IN_PROGRESS:
|
||||
{
|
||||
channel._state = Channel::BLOCK_IO_PENDING;
|
||||
channel._child_pba = child.pba;
|
||||
level_to_write = level_index - 1;
|
||||
progress = true;
|
||||
break;
|
||||
}
|
||||
|
||||
case Channel::BLOCK_IO_PENDING:
|
||||
break;
|
||||
|
||||
case Channel::BLOCK_IO_IN_PROGRESS:
|
||||
break;
|
||||
|
||||
case Channel::BLOCK_IO_COMPLETE:
|
||||
/* bail early in case the allocator failed */
|
||||
if (!channel._generated_req_success) {
|
||||
_mark_req_failed(channel, progress,
|
||||
"write block for FT initialization");
|
||||
break;
|
||||
}
|
||||
channel._state = Channel::IN_PROGRESS;
|
||||
|
||||
child_state = CS::DONE;
|
||||
progress = true;
|
||||
|
||||
if (DEBUG)
|
||||
log("[ft_init] node: ", level_index, " ", child_index,
|
||||
" write pba: ", channel._child_pba, " level: ",
|
||||
level_index -1, " (child: ", child, ")");
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Ft_initializer::_execute(Channel &channel,
|
||||
bool &progress)
|
||||
{
|
||||
Request &req { channel._request };
|
||||
|
||||
/*
|
||||
* First handle all leaf child nodes that starts after
|
||||
* triggering the inner T2 nodes below.
|
||||
*/
|
||||
for (uint64_t child_idx = 0; child_idx <= req._max_child_idx; child_idx++) {
|
||||
|
||||
Ft_initializer_channel::Child_state &state =
|
||||
channel._t2_level.children_state[child_idx];
|
||||
|
||||
if (state != Ft_initializer_channel::Child_state::DONE) {
|
||||
|
||||
Type_2_node &child =
|
||||
channel._t2_level.children.nodes[child_idx];
|
||||
|
||||
_execute_leaf_child(channel, progress, req._nr_of_leaves,
|
||||
child, state, child_idx);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Second handle all inner child nodes that starts after
|
||||
* triggering the root node below.
|
||||
*/
|
||||
for (uint64_t level_idx = 1; level_idx <= req._max_level_idx; level_idx++) {
|
||||
|
||||
for (uint64_t child_idx = 0; child_idx <= req._max_child_idx; child_idx++) {
|
||||
|
||||
Ft_initializer_channel::Child_state &state =
|
||||
channel._t1_levels[level_idx].children_state[child_idx];
|
||||
|
||||
if (state != Ft_initializer_channel::Child_state::DONE) {
|
||||
|
||||
Type_1_node &child =
|
||||
channel._t1_levels[level_idx].children.nodes[child_idx];
|
||||
|
||||
if (level_idx == 2) {
|
||||
Ft_initializer_channel::Type_2_level &t2_level =
|
||||
channel._t2_level;
|
||||
|
||||
_execute_inner_t2_child(channel, progress,
|
||||
req._nr_of_leaves,
|
||||
channel._level_to_write,
|
||||
child, t2_level, state,
|
||||
level_idx, child_idx);
|
||||
} else {
|
||||
|
||||
Ft_initializer_channel::Type_1_level &t1_level =
|
||||
channel._t1_levels[level_idx - 1];
|
||||
|
||||
_execute_inner_t1_child(channel, progress,
|
||||
req._nr_of_leaves,
|
||||
channel._level_to_write,
|
||||
child, t1_level, state,
|
||||
level_idx, child_idx);
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Checking the root node will trigger the initialization process as
|
||||
* well as will finish it.
|
||||
*/
|
||||
if (channel._root_node.state != Ft_initializer_channel::Child_state::DONE) {
|
||||
|
||||
Ft_initializer_channel::Type_1_level &t1_level =
|
||||
channel._t1_levels[req._max_level_idx];
|
||||
|
||||
_execute_inner_t1_child(channel, progress,
|
||||
req._nr_of_leaves,
|
||||
channel._level_to_write,
|
||||
channel._root_node.node, t1_level, channel._root_node.state,
|
||||
req._max_level_idx + 1, 0);
|
||||
if (!_generated_req_success) {
|
||||
error("ft initializer request (", *_req_ptr, ") failed because generated request failed");
|
||||
_req_ptr->_success = false;
|
||||
_state = REQ_COMPLETE;
|
||||
_req_ptr = nullptr;
|
||||
return;
|
||||
}
|
||||
_state = (State)state_uint;
|
||||
}
|
||||
|
||||
/*
|
||||
* We will end up here when the root state is 'DONE'.
|
||||
*/
|
||||
if (req._nr_of_leaves == 0)
|
||||
_mark_req_successful(channel, progress);
|
||||
|
||||
void Ft_initializer_channel::_mark_req_failed(bool &progress, char const *str)
|
||||
{
|
||||
error("ft initializer request (", *_req_ptr, ") failed because: ", str);
|
||||
_req_ptr->_success = false;
|
||||
_state = REQ_COMPLETE;
|
||||
_req_ptr = nullptr;
|
||||
progress = true;
|
||||
}
|
||||
|
||||
|
||||
void Ft_initializer_channel::_mark_req_successful(bool &progress)
|
||||
{
|
||||
_req_ptr->_ft.t1_node(_t1_blks.items[_req_ptr->_ft.max_lvl + 1].nodes[0]);
|
||||
_req_ptr->_success = true;
|
||||
_state = REQ_COMPLETE;
|
||||
_req_ptr = nullptr;
|
||||
progress = true;
|
||||
}
|
||||
|
||||
|
||||
void Ft_initializer_channel::_reset_level(Tree_level_index lvl, Node_state node_state)
|
||||
{
|
||||
if (lvl == 1)
|
||||
for (Tree_node_index idx = 0; idx < NUM_NODES_PER_BLK; idx++) {
|
||||
_t2_blk.nodes[idx] = { };
|
||||
_t2_node_states[idx] = node_state;
|
||||
}
|
||||
else
|
||||
_mark_req_failed(channel, progress, "initialize FT");
|
||||
for (Tree_node_index idx = 0; idx < NUM_NODES_PER_BLK; idx++) {
|
||||
_t1_blks.items[lvl].nodes[idx] = { };
|
||||
_t1_node_states[lvl][idx] = node_state;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Ft_initializer::_execute_init(Channel &channel,
|
||||
bool &progress)
|
||||
void Ft_initializer_channel::execute(bool &progress)
|
||||
{
|
||||
switch (channel._state) {
|
||||
case Channel::SUBMITTED:
|
||||
|
||||
/* clean residual state */
|
||||
for (unsigned int i = 0; i < TREE_MAX_LEVEL; i++) {
|
||||
Ft_initializer_channel::reset_level(channel._t1_levels[i],
|
||||
Ft_initializer_channel::Child_state::DONE);
|
||||
}
|
||||
channel._level_to_write = 0;
|
||||
|
||||
channel._state = Channel::PENDING;
|
||||
channel._root_node.state = Ft_initializer_channel::Child_state::INIT_BLOCK;
|
||||
progress = true;
|
||||
|
||||
if (!_req_ptr)
|
||||
return;
|
||||
|
||||
case Channel::PENDING:
|
||||
Request &req { *_req_ptr };
|
||||
switch (_state) {
|
||||
case REQ_SUBMITTED:
|
||||
|
||||
channel._state = Channel::IN_PROGRESS;
|
||||
_num_remaining_leaves = req._ft.num_leaves;
|
||||
for (Tree_level_index lvl = 0; lvl < TREE_MAX_LEVEL; lvl++)
|
||||
_reset_level(lvl, DONE);
|
||||
|
||||
_t1_node_states[req._ft.max_lvl + 1][0] = INIT_BLOCK;
|
||||
_state = EXECUTE_NODES;
|
||||
progress = true;
|
||||
return;
|
||||
|
||||
case Channel::IN_PROGRESS:
|
||||
case EXECUTE_NODES:
|
||||
|
||||
_execute(channel, progress);
|
||||
for (Tree_node_index node_idx = 0; node_idx < req._ft.degree; node_idx++)
|
||||
if (_execute_t2_node(node_idx, progress))
|
||||
return;
|
||||
|
||||
for (Tree_level_index lvl = 1; lvl <= req._ft.max_lvl + 1; lvl++)
|
||||
for (Tree_node_index node_idx = 0; node_idx < req._ft.degree; node_idx++)
|
||||
if (_execute_t1_node(lvl, node_idx, progress))
|
||||
return;
|
||||
|
||||
if (_num_remaining_leaves)
|
||||
_mark_req_failed(progress, "leaves remaining");
|
||||
else
|
||||
_mark_req_successful(progress);
|
||||
return;
|
||||
|
||||
case Channel::BLOCK_ALLOC_COMPLETE:
|
||||
|
||||
_execute(channel, progress);
|
||||
return;
|
||||
|
||||
case Channel::BLOCK_IO_COMPLETE:
|
||||
|
||||
_execute(channel, progress);
|
||||
return;
|
||||
|
||||
default:
|
||||
/*
|
||||
* Omit other states related to ALLOC and IO as those
|
||||
* are handled via Module API.
|
||||
*/
|
||||
return;
|
||||
default: return;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Ft_initializer::_mark_req_failed(Channel &channel,
|
||||
bool &progress,
|
||||
char const *str)
|
||||
void Ft_initializer_channel::_request_submitted(Module_request &mod_req)
|
||||
{
|
||||
error("request failed: failed to ", str);
|
||||
channel._request._success = false;
|
||||
channel._state = Channel::COMPLETE;
|
||||
progress = true;
|
||||
}
|
||||
|
||||
|
||||
void Ft_initializer::_mark_req_successful(Channel &channel,
|
||||
bool &progress)
|
||||
{
|
||||
Request &req { channel._request };
|
||||
|
||||
memcpy(req._root_node, &channel._root_node.node, sizeof (req._root_node));
|
||||
req._success = true;
|
||||
|
||||
channel._state = Channel::COMPLETE;
|
||||
progress = true;
|
||||
}
|
||||
|
||||
|
||||
bool Ft_initializer::_peek_completed_request(uint8_t *buf_ptr,
|
||||
size_t buf_size)
|
||||
{
|
||||
for (Channel &channel : _channels) {
|
||||
if (channel._state == Channel::COMPLETE) {
|
||||
if (sizeof(channel._request) > buf_size) {
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
memcpy(buf_ptr, &channel._request, sizeof(channel._request));
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
void Ft_initializer::_drop_completed_request(Module_request &req)
|
||||
{
|
||||
Module_request_id id { 0 };
|
||||
id = req.dst_request_id();
|
||||
if (id >= NR_OF_CHANNELS) {
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
if (_channels[id]._state != Channel::COMPLETE) {
|
||||
class Exception_2 { };
|
||||
throw Exception_2 { };
|
||||
}
|
||||
_channels[id]._state = Channel::INACTIVE;
|
||||
}
|
||||
|
||||
|
||||
bool Ft_initializer::_peek_generated_request(uint8_t *buf_ptr,
|
||||
size_t buf_size)
|
||||
{
|
||||
for (Module_request_id id { 0 }; id < NR_OF_CHANNELS; id++) {
|
||||
|
||||
Channel &channel { _channels[id] };
|
||||
|
||||
if (channel._state != Ft_initializer_channel::State::INACTIVE)
|
||||
|
||||
switch (channel._state) {
|
||||
case Ft_initializer_channel::State::BLOCK_ALLOC_PENDING:
|
||||
{
|
||||
Block_allocator_request::Type const block_allocator_req_type {
|
||||
Block_allocator_request::GET };
|
||||
|
||||
Block_allocator_request::create(
|
||||
buf_ptr, buf_size, FT_INITIALIZER, id,
|
||||
block_allocator_req_type);
|
||||
|
||||
return true;
|
||||
}
|
||||
case Ft_initializer_channel::State::BLOCK_IO_PENDING:
|
||||
{
|
||||
Block_io_request::Type const block_io_req_type {
|
||||
Block_io_request::WRITE };
|
||||
|
||||
if (channel._level_to_write == 1)
|
||||
channel._t2_level.children.encode_to_blk(channel._encoded_blk);
|
||||
else
|
||||
channel._t1_levels[channel._level_to_write].children.encode_to_blk(channel._encoded_blk);
|
||||
|
||||
construct_in_buf<Block_io_request>(
|
||||
buf_ptr, buf_size, FT_INITIALIZER, id, block_io_req_type, 0,
|
||||
0, 0, channel._child_pba, 0, 1,
|
||||
(void *)&channel._encoded_blk, nullptr);
|
||||
|
||||
if (DEBUG) {
|
||||
log("BLOCK_IO_PENDING write ", channel._child_pba);
|
||||
if (channel._level_to_write == 1)
|
||||
Ft_initializer_channel::dump(channel._t2_level.children);
|
||||
else
|
||||
Ft_initializer_channel::dump(channel._t1_levels[channel._level_to_write].children);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
void Ft_initializer::_drop_generated_request(Module_request &req)
|
||||
{
|
||||
Module_request_id const id { req.src_request_id() };
|
||||
if (id >= NR_OF_CHANNELS) {
|
||||
class Bad_id { };
|
||||
throw Bad_id { };
|
||||
}
|
||||
switch (_channels[id]._state) {
|
||||
case Ft_initializer_channel::State::BLOCK_ALLOC_PENDING:
|
||||
_channels[id]._state = Ft_initializer_channel::State::BLOCK_ALLOC_IN_PROGRESS;
|
||||
break;
|
||||
case Ft_initializer_channel::State::BLOCK_IO_PENDING:
|
||||
_channels[id]._state = Ft_initializer_channel::State::BLOCK_IO_IN_PROGRESS;
|
||||
break;
|
||||
default:
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Ft_initializer::generated_request_complete(Module_request &req)
|
||||
{
|
||||
Module_request_id const id { req.src_request_id() };
|
||||
if (id >= NR_OF_CHANNELS) {
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
switch (_channels[id]._state) {
|
||||
case Channel::BLOCK_ALLOC_IN_PROGRESS:
|
||||
{
|
||||
if (req.dst_module_id() != BLOCK_ALLOCATOR) {
|
||||
class Exception_3 { };
|
||||
throw Exception_3 { };
|
||||
}
|
||||
Block_allocator_request const *block_allocator_req = static_cast<Block_allocator_request const*>(&req);
|
||||
_channels[id]._state = Channel::BLOCK_ALLOC_COMPLETE;
|
||||
_channels[id]._blk_nr = block_allocator_req->blk_nr();
|
||||
_channels[id]._generated_req_success = block_allocator_req->success();
|
||||
break;
|
||||
}
|
||||
case Channel::BLOCK_IO_IN_PROGRESS:
|
||||
{
|
||||
if (req.dst_module_id() != BLOCK_IO) {
|
||||
class Exception_4 { };
|
||||
throw Exception_4 { };
|
||||
}
|
||||
Block_io_request const *block_io_req = static_cast<Block_io_request const*>(&req);
|
||||
_channels[id]._state = Channel::BLOCK_IO_COMPLETE;
|
||||
_channels[id]._generated_req_success = block_io_req->success();
|
||||
break;
|
||||
}
|
||||
default:
|
||||
class Exception_2 { };
|
||||
throw Exception_2 { };
|
||||
}
|
||||
_req_ptr = static_cast<Request *>(&mod_req);
|
||||
_state = REQ_SUBMITTED;
|
||||
}
|
||||
|
||||
|
||||
Ft_initializer::Ft_initializer()
|
||||
{ }
|
||||
|
||||
|
||||
bool Ft_initializer::ready_to_submit_request()
|
||||
{
|
||||
for (Channel &channel : _channels) {
|
||||
if (channel._state == Channel::INACTIVE)
|
||||
return true;
|
||||
Module_channel_id id { 0 };
|
||||
for (Constructible<Channel> &chan : _channels) {
|
||||
chan.construct(id++);
|
||||
add_channel(*chan);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
void Ft_initializer::submit_request(Module_request &req)
|
||||
{
|
||||
for (Module_request_id id { 0 }; id < NR_OF_CHANNELS; id++) {
|
||||
if (_channels[id]._state == Channel::INACTIVE) {
|
||||
req.dst_request_id(id);
|
||||
_channels[id]._request = *static_cast<Request *>(&req);
|
||||
_channels[id]._state = Channel::SUBMITTED;
|
||||
return;
|
||||
}
|
||||
}
|
||||
class Invalid_call { };
|
||||
throw Invalid_call { };
|
||||
}
|
||||
|
||||
|
||||
void Ft_initializer::execute(bool &progress)
|
||||
{
|
||||
for (Channel &channel : _channels) {
|
||||
|
||||
if (channel._state == Channel::INACTIVE)
|
||||
continue;
|
||||
|
||||
Request &req { channel._request };
|
||||
switch (req._type) {
|
||||
case Request::INIT:
|
||||
|
||||
_execute_init(channel, progress);
|
||||
|
||||
break;
|
||||
default:
|
||||
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
}
|
||||
for_each_channel<Channel>([&] (Channel &chan) {
|
||||
chan.execute(progress); });
|
||||
}
|
||||
|
@ -1,843 +0,0 @@
|
||||
/*
|
||||
* \brief Module for re-sizing the free tree
|
||||
* \author Martin Stein
|
||||
* \date 2023-05-09
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2023 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
/* base includes */
|
||||
#include <base/log.h>
|
||||
|
||||
/* tresor tester includes */
|
||||
#include <tresor/meta_tree.h>
|
||||
#include <tresor/block_io.h>
|
||||
#include <tresor/ft_resizing.h>
|
||||
#include <tresor/sha256_4k_hash.h>
|
||||
|
||||
using namespace Tresor;
|
||||
|
||||
|
||||
/*************************
|
||||
** Ft_resizing_request **
|
||||
*************************/
|
||||
|
||||
Ft_resizing_request::
|
||||
Ft_resizing_request(uint64_t src_module_id,
|
||||
uint64_t src_request_id,
|
||||
Type type,
|
||||
Generation curr_gen,
|
||||
Type_1_node ft_root,
|
||||
Tree_level_index ft_max_lvl,
|
||||
Number_of_leaves ft_nr_of_leaves,
|
||||
Tree_degree ft_degree,
|
||||
addr_t mt_root_pba_ptr,
|
||||
addr_t mt_root_gen_ptr,
|
||||
addr_t mt_root_hash_ptr,
|
||||
Tree_level_index mt_max_level,
|
||||
Tree_degree mt_degree,
|
||||
Number_of_leaves mt_leaves,
|
||||
Physical_block_address pba,
|
||||
Number_of_blocks nr_of_pbas)
|
||||
:
|
||||
Module_request { src_module_id, src_request_id, FT_RESIZING },
|
||||
_type { type },
|
||||
_curr_gen { curr_gen },
|
||||
_ft_root { ft_root },
|
||||
_ft_max_lvl { ft_max_lvl },
|
||||
_ft_nr_of_leaves { ft_nr_of_leaves },
|
||||
_ft_degree { ft_degree },
|
||||
_mt_root_pba_ptr { mt_root_pba_ptr },
|
||||
_mt_root_gen_ptr { mt_root_gen_ptr },
|
||||
_mt_root_hash_ptr { mt_root_hash_ptr },
|
||||
_mt_max_level { mt_max_level },
|
||||
_mt_degree { mt_degree },
|
||||
_mt_leaves { mt_leaves },
|
||||
_pba { pba },
|
||||
_nr_of_pbas { nr_of_pbas }
|
||||
{ }
|
||||
|
||||
char const *Ft_resizing_request::type_to_string(Type op)
|
||||
{
|
||||
switch (op) {
|
||||
case INVALID: return "invalid";
|
||||
case FT_EXTENSION_STEP: return "ft_ext_step";
|
||||
}
|
||||
return "?";
|
||||
}
|
||||
|
||||
|
||||
/*************************
|
||||
** Ft_resizing_request **
|
||||
*************************/
|
||||
|
||||
void Ft_resizing::_execute_ft_ext_step_read_inner_node_completed(Channel &channel,
|
||||
unsigned const job_idx,
|
||||
bool &progress)
|
||||
{
|
||||
Request &req { channel._request };
|
||||
if (not channel._generated_prim.succ) {
|
||||
class Primitive_not_successfull_ft_resizing { };
|
||||
throw Primitive_not_successfull_ft_resizing { };
|
||||
}
|
||||
|
||||
if (channel._lvl_idx > 1) {
|
||||
|
||||
if (channel._lvl_idx == req._ft_max_lvl) {
|
||||
|
||||
if (not check_sha256_4k_hash(channel._encoded_blk,
|
||||
req._ft_root.hash)) {
|
||||
class Program_error_ft_resizing_hash_mismatch { };
|
||||
throw Program_error_ft_resizing_hash_mismatch { };
|
||||
}
|
||||
|
||||
} else {
|
||||
|
||||
Tree_level_index const parent_lvl_idx = channel._lvl_idx + 1;
|
||||
Tree_node_index const child_idx = t1_child_idx_for_vba(channel._vba, parent_lvl_idx, req._ft_degree);
|
||||
Type_1_node const &child = channel._t1_blks.items[parent_lvl_idx].nodes[child_idx];
|
||||
|
||||
if (not check_sha256_4k_hash(channel._encoded_blk,
|
||||
child.hash)) {
|
||||
class Program_error_ft_resizing_hash_mismatch_2 { };
|
||||
throw Program_error_ft_resizing_hash_mismatch_2 { };
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
Tree_level_index const parent_lvl_idx = channel._lvl_idx;
|
||||
Tree_level_index const child_lvl_idx = channel._lvl_idx - 1;
|
||||
Tree_node_index const child_idx = t1_child_idx_for_vba(channel._vba, parent_lvl_idx, req._ft_degree);
|
||||
Type_1_node const &child = channel._t1_blks.items[parent_lvl_idx].nodes[child_idx];
|
||||
|
||||
if (child.valid()) {
|
||||
|
||||
channel._lvl_idx = child_lvl_idx;
|
||||
channel._old_pbas.pbas [child_lvl_idx] = child.pba;
|
||||
channel._old_generations.items[child_lvl_idx] = child.gen;
|
||||
|
||||
channel._generated_prim = {
|
||||
.op = Channel::Generated_prim::Type::READ,
|
||||
.succ = false,
|
||||
.tg = Channel::Tag_type::TAG_FT_RSZG_CACHE,
|
||||
.blk_nr = child.pba,
|
||||
.idx = job_idx
|
||||
};
|
||||
|
||||
channel._state = Channel::State::READ_INNER_NODE_PENDING;
|
||||
progress = true;
|
||||
|
||||
if (VERBOSE_FT_EXTENSION)
|
||||
log(" lvl ", parent_lvl_idx, " child ", child_idx,
|
||||
" (", child, "): load to lvl ", channel._lvl_idx);
|
||||
|
||||
} else {
|
||||
|
||||
_add_new_branch_to_ft_using_pba_contingent(parent_lvl_idx,
|
||||
child_idx,
|
||||
req._ft_degree,
|
||||
req._curr_gen,
|
||||
req._pba,
|
||||
req._nr_of_pbas,
|
||||
channel._t1_blks,
|
||||
channel._t2_blk,
|
||||
channel._new_pbas,
|
||||
channel._lvl_idx,
|
||||
req._nr_of_leaves);
|
||||
|
||||
channel._alloc_lvl_idx = parent_lvl_idx;
|
||||
|
||||
if (channel._old_generations.items[channel._alloc_lvl_idx] == req._curr_gen) {
|
||||
|
||||
channel._new_pbas.pbas[channel._alloc_lvl_idx] =
|
||||
channel._old_pbas.pbas[channel._alloc_lvl_idx];
|
||||
|
||||
channel._state = Channel::State::ALLOC_PBA_COMPLETED;
|
||||
progress = true;
|
||||
|
||||
} else {
|
||||
|
||||
channel._generated_prim = {
|
||||
.op = Channel::Generated_prim::Type::READ,
|
||||
.succ = false,
|
||||
.tg = Channel::Tag_type::TAG_FT_RSZG_MT_ALLOC,
|
||||
.blk_nr = 0,
|
||||
.idx = job_idx
|
||||
};
|
||||
|
||||
channel._state = Channel::State::ALLOC_PBA_PENDING;
|
||||
progress = true;
|
||||
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
||||
{
|
||||
Tree_level_index const parent_lvl_idx = channel._lvl_idx + 1;
|
||||
Tree_node_index const child_idx = t1_child_idx_for_vba(channel._vba, parent_lvl_idx, req._ft_degree);
|
||||
|
||||
if (not check_sha256_4k_hash(channel._encoded_blk,
|
||||
channel._t1_blks.items[parent_lvl_idx].nodes[child_idx].hash)) {
|
||||
class Program_error_ft_resizing_hash_mismatch_3 { };
|
||||
throw Program_error_ft_resizing_hash_mismatch_3 { };
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
Tree_level_index const parent_lvl_idx = channel._lvl_idx;
|
||||
Tree_node_index const child_idx = t2_child_idx_for_vba(channel._vba, req._ft_degree);
|
||||
Type_2_node const &child = channel._t2_blk.nodes[child_idx];
|
||||
|
||||
if (child.valid()) {
|
||||
class Program_error_ft_resizing_t2_valid { };
|
||||
throw Program_error_ft_resizing_t2_valid { };
|
||||
}
|
||||
|
||||
_add_new_branch_to_ft_using_pba_contingent(parent_lvl_idx,
|
||||
child_idx,
|
||||
req._ft_degree,
|
||||
req._curr_gen,
|
||||
req._pba,
|
||||
req._nr_of_pbas,
|
||||
channel._t1_blks,
|
||||
channel._t2_blk,
|
||||
channel._new_pbas,
|
||||
channel._lvl_idx,
|
||||
req._nr_of_leaves);
|
||||
|
||||
channel._alloc_lvl_idx = parent_lvl_idx;
|
||||
|
||||
if (VERBOSE_FT_EXTENSION)
|
||||
log(" alloc lvl ", channel._alloc_lvl_idx);
|
||||
|
||||
channel._generated_prim = {
|
||||
.op = Channel::Generated_prim::Type::READ,
|
||||
.succ = false,
|
||||
.tg = Channel::Tag_type::TAG_FT_RSZG_MT_ALLOC,
|
||||
.blk_nr = 0,
|
||||
.idx = job_idx
|
||||
};
|
||||
|
||||
channel._state = Channel::State::ALLOC_PBA_PENDING;
|
||||
progress = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Ft_resizing::_set_args_for_write_back_of_inner_lvl(Tree_level_index const max_lvl_idx,
|
||||
Tree_level_index const lvl_idx,
|
||||
Physical_block_address const pba,
|
||||
unsigned const prim_idx,
|
||||
Channel::State &job_state,
|
||||
bool &progress,
|
||||
Channel::Generated_prim &prim)
|
||||
{
|
||||
if (lvl_idx == 0) {
|
||||
class Program_error_ft_resizing_lvl_idx_zero { };
|
||||
throw Program_error_ft_resizing_lvl_idx_zero { };
|
||||
}
|
||||
|
||||
if (lvl_idx > max_lvl_idx) {
|
||||
class Program_error_ft_resizing_lvl_idx_large { };
|
||||
throw Program_error_ft_resizing_lvl_idx_large { };
|
||||
}
|
||||
|
||||
prim = {
|
||||
.op = Channel::Generated_prim::Type::WRITE,
|
||||
.succ = false,
|
||||
.tg = Channel::Tag_type::TAG_FT_RSZG_CACHE,
|
||||
.blk_nr = pba,
|
||||
.idx = prim_idx
|
||||
};
|
||||
|
||||
if (VERBOSE_FT_EXTENSION)
|
||||
log(" lvl ", lvl_idx, " write to pba ", pba);
|
||||
|
||||
if (lvl_idx < max_lvl_idx) {
|
||||
job_state = Channel::State::WRITE_INNER_NODE_PENDING;
|
||||
progress = true;
|
||||
} else {
|
||||
job_state = Channel::State::WRITE_ROOT_NODE_PENDING;
|
||||
progress = true;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Ft_resizing::_add_new_root_lvl_to_ft_using_pba_contingent(Type_1_node &ft_root,
|
||||
Tree_level_index &ft_max_lvl_idx,
|
||||
Number_of_leaves const ft_nr_of_leaves,
|
||||
Generation const curr_gen,
|
||||
Channel::Type_1_node_blocks &t1_blks,
|
||||
Tree_walk_pbas &new_pbas,
|
||||
Physical_block_address &first_pba,
|
||||
Number_of_blocks &nr_of_pbas)
|
||||
{
|
||||
if (ft_max_lvl_idx >= TREE_MAX_LEVEL) {
|
||||
class Program_error_ft_resizing_max_level { };
|
||||
throw Program_error_ft_resizing_max_level { };
|
||||
}
|
||||
|
||||
ft_max_lvl_idx += 1;
|
||||
|
||||
t1_blks.items[ft_max_lvl_idx] = { };
|
||||
t1_blks.items[ft_max_lvl_idx].nodes[0] = ft_root;
|
||||
|
||||
new_pbas.pbas[ft_max_lvl_idx] = alloc_pba_from_resizing_contingent(first_pba, nr_of_pbas);
|
||||
|
||||
ft_root = {
|
||||
.pba = new_pbas.pbas[ft_max_lvl_idx],
|
||||
.gen = curr_gen,
|
||||
.hash = { },
|
||||
};
|
||||
|
||||
if (VERBOSE_FT_EXTENSION) {
|
||||
log(" set ft root: ", ft_root, " leaves ", ft_nr_of_leaves,
|
||||
" max lvl ", ft_max_lvl_idx);
|
||||
|
||||
log(" set lvl ", ft_max_lvl_idx,
|
||||
" child 0: ", t1_blks.items[ft_max_lvl_idx].nodes[0]);
|
||||
}
|
||||
(void)ft_nr_of_leaves;
|
||||
}
|
||||
|
||||
|
||||
void Ft_resizing::_add_new_branch_to_ft_using_pba_contingent(Tree_level_index const mount_point_lvl_idx,
|
||||
Tree_node_index const mount_point_child_idx,
|
||||
Tree_degree const ft_degree,
|
||||
Generation const curr_gen,
|
||||
Physical_block_address &first_pba,
|
||||
Number_of_blocks &nr_of_pbas,
|
||||
Channel::Type_1_node_blocks &t1_blks,
|
||||
Type_2_node_block &t2_blk,
|
||||
Tree_walk_pbas &new_pbas,
|
||||
Tree_level_index &stopped_at_lvl_idx,
|
||||
Number_of_leaves &nr_of_leaves)
|
||||
{
|
||||
nr_of_leaves = 0;
|
||||
stopped_at_lvl_idx = mount_point_lvl_idx;
|
||||
|
||||
if (mount_point_lvl_idx > 1) {
|
||||
for (unsigned lvl_idx = 1; lvl_idx <= mount_point_lvl_idx - 1; lvl_idx++) {
|
||||
if (lvl_idx > 1)
|
||||
t1_blks.items[lvl_idx] = Type_1_node_block { };
|
||||
else
|
||||
t2_blk = Type_2_node_block { };
|
||||
|
||||
if (VERBOSE_FT_EXTENSION)
|
||||
log(" reset lvl ", lvl_idx);
|
||||
}
|
||||
}
|
||||
|
||||
if (nr_of_pbas > 0) {
|
||||
|
||||
for (unsigned lvl_idx = mount_point_lvl_idx; lvl_idx >= 1; lvl_idx --) {
|
||||
stopped_at_lvl_idx = lvl_idx;
|
||||
|
||||
if (lvl_idx > 1) {
|
||||
|
||||
if (nr_of_pbas == 0)
|
||||
break;
|
||||
|
||||
Tree_node_index const child_idx = (lvl_idx == mount_point_lvl_idx) ? mount_point_child_idx : 0;
|
||||
Tree_level_index const child_lvl_idx = lvl_idx - 1;
|
||||
|
||||
new_pbas.pbas[child_lvl_idx] = alloc_pba_from_resizing_contingent(first_pba, nr_of_pbas);
|
||||
|
||||
t1_blks.items[lvl_idx].nodes[child_idx] = {
|
||||
.pba = new_pbas.pbas[child_lvl_idx],
|
||||
.gen = curr_gen,
|
||||
.hash = { }
|
||||
};
|
||||
|
||||
if (VERBOSE_FT_EXTENSION)
|
||||
log(" set lvl d ", lvl_idx, " child ", child_idx,
|
||||
": ", t1_blks.items[lvl_idx].nodes[child_idx]);
|
||||
|
||||
} else {
|
||||
Tree_node_index const first_child_idx = (lvl_idx == mount_point_lvl_idx) ? mount_point_child_idx : 0;
|
||||
|
||||
for (Tree_node_index child_idx = first_child_idx; child_idx <= ft_degree - 1; child_idx++) {
|
||||
|
||||
if (nr_of_pbas == 0)
|
||||
break;
|
||||
|
||||
Physical_block_address child_pba = alloc_pba_from_resizing_contingent(first_pba, nr_of_pbas);
|
||||
|
||||
t2_blk.nodes[child_idx] = {
|
||||
.pba = child_pba,
|
||||
.last_vba = INVALID_VBA,
|
||||
.alloc_gen = INITIAL_GENERATION,
|
||||
.free_gen = INITIAL_GENERATION,
|
||||
.last_key_id = INVALID_KEY_ID,
|
||||
.reserved = false
|
||||
};
|
||||
|
||||
if (VERBOSE_FT_EXTENSION)
|
||||
log(" set lvl e ", lvl_idx, " child ", child_idx,
|
||||
": ", t2_blk.nodes[child_idx]);
|
||||
|
||||
nr_of_leaves = nr_of_leaves + 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Ft_resizing::_execute_ft_extension_step(Channel &chan,
|
||||
unsigned const chan_idx,
|
||||
bool &progress)
|
||||
{
|
||||
Request &req { chan._request };
|
||||
switch (chan._state) {
|
||||
case Channel::State::SUBMITTED:
|
||||
|
||||
req._nr_of_leaves = 0;
|
||||
chan._vba = req._ft_nr_of_leaves;
|
||||
|
||||
chan._old_pbas = { };
|
||||
chan._old_generations = { };
|
||||
chan._new_pbas = { };
|
||||
|
||||
chan._lvl_idx = req._ft_max_lvl;
|
||||
chan._old_pbas.pbas[chan._lvl_idx] = req._ft_root.pba;
|
||||
chan._old_generations.items[chan._lvl_idx] = req._ft_root.gen;
|
||||
|
||||
if (chan._vba <= tree_max_max_vba(req._ft_degree, req._ft_max_lvl)) {
|
||||
|
||||
chan._generated_prim = {
|
||||
.op = Channel::Generated_prim::Type::READ,
|
||||
.succ = false,
|
||||
.tg = Channel::Tag_type::TAG_FT_RSZG_CACHE,
|
||||
.blk_nr = req._ft_root.pba,
|
||||
.idx = chan_idx
|
||||
};
|
||||
|
||||
if (VERBOSE_FT_EXTENSION)
|
||||
log(" root (", req._ft_root,
|
||||
" leaves ", req._ft_nr_of_leaves,
|
||||
" max lvl ", req._ft_max_lvl,
|
||||
"): load to lvl ", chan._lvl_idx);
|
||||
|
||||
chan._state = Channel::State::READ_ROOT_NODE_PENDING;
|
||||
progress = true;
|
||||
|
||||
} else {
|
||||
|
||||
_add_new_root_lvl_to_ft_using_pba_contingent(req._ft_root,
|
||||
req._ft_max_lvl,
|
||||
req._ft_nr_of_leaves,
|
||||
req._curr_gen,
|
||||
chan._t1_blks,
|
||||
chan._new_pbas,
|
||||
req._pba,
|
||||
req._nr_of_pbas);
|
||||
|
||||
_add_new_branch_to_ft_using_pba_contingent(req._ft_max_lvl,
|
||||
1,
|
||||
req._ft_degree,
|
||||
req._curr_gen,
|
||||
req._pba,
|
||||
req._nr_of_pbas,
|
||||
chan._t1_blks,
|
||||
chan._t2_blk,
|
||||
chan._new_pbas,
|
||||
chan._lvl_idx,
|
||||
req._nr_of_leaves);
|
||||
|
||||
if (VERBOSE_FT_EXTENSION)
|
||||
log(" pbas allocated: curr gen ", req._curr_gen);
|
||||
|
||||
_set_args_for_write_back_of_inner_lvl(req._ft_max_lvl,
|
||||
chan._lvl_idx,
|
||||
chan._new_pbas.pbas[chan._lvl_idx],
|
||||
chan_idx,
|
||||
chan._state,
|
||||
progress,
|
||||
chan._generated_prim);
|
||||
|
||||
}
|
||||
|
||||
break;
|
||||
case Channel::State::READ_ROOT_NODE_COMPLETED:
|
||||
_execute_ft_ext_step_read_inner_node_completed(chan, chan_idx, progress);
|
||||
break;
|
||||
case Channel::State::READ_INNER_NODE_COMPLETED:
|
||||
_execute_ft_ext_step_read_inner_node_completed(chan, chan_idx, progress);
|
||||
break;
|
||||
case Channel::State::ALLOC_PBA_COMPLETED:
|
||||
if (chan._alloc_lvl_idx < req._ft_max_lvl) {
|
||||
|
||||
chan._alloc_lvl_idx = chan._alloc_lvl_idx + 1;
|
||||
|
||||
if (chan._old_generations.items[chan._alloc_lvl_idx] == req._curr_gen) {
|
||||
|
||||
chan._new_pbas.pbas[chan._alloc_lvl_idx] = chan._old_pbas.pbas[chan._alloc_lvl_idx];
|
||||
|
||||
chan._state = Channel::State::ALLOC_PBA_COMPLETED;
|
||||
progress = true;
|
||||
|
||||
} else {
|
||||
|
||||
chan._generated_prim = {
|
||||
.op = Channel::Generated_prim::Type::READ,
|
||||
.succ = false,
|
||||
.tg = Channel::Tag_type::TAG_FT_RSZG_MT_ALLOC,
|
||||
.blk_nr = 0,
|
||||
.idx = chan_idx
|
||||
};
|
||||
|
||||
chan._state = Channel::State::ALLOC_PBA_PENDING;
|
||||
progress = true;
|
||||
|
||||
}
|
||||
|
||||
} else {
|
||||
|
||||
if (VERBOSE_FT_EXTENSION)
|
||||
log(" pbas allocated: curr gen ", req._curr_gen);
|
||||
|
||||
_set_args_for_write_back_of_inner_lvl(req._ft_max_lvl,
|
||||
chan._lvl_idx,
|
||||
chan._new_pbas.pbas[chan._lvl_idx],
|
||||
chan_idx,
|
||||
chan._state,
|
||||
progress,
|
||||
chan._generated_prim);
|
||||
|
||||
}
|
||||
break;
|
||||
case Channel::State::WRITE_INNER_NODE_COMPLETED:
|
||||
|
||||
if (not chan._generated_prim.succ) {
|
||||
class Primitive_not_successfull_ft_resizing_write_inner { };
|
||||
throw Primitive_not_successfull_ft_resizing_write_inner { };
|
||||
}
|
||||
|
||||
if (chan._lvl_idx > 1) {
|
||||
|
||||
Tree_level_index const parent_lvl_idx = chan._lvl_idx + 1;
|
||||
Tree_level_index const child_lvl_idx = chan._lvl_idx;
|
||||
Tree_node_index const child_idx = t1_child_idx_for_vba(chan._vba, parent_lvl_idx, req._ft_degree);
|
||||
|
||||
Type_1_node &child {
|
||||
chan._t1_blks.items[parent_lvl_idx].nodes[child_idx] };
|
||||
|
||||
child = {
|
||||
.pba = chan._new_pbas.pbas[child_lvl_idx],
|
||||
.gen = req._curr_gen,
|
||||
.hash = { },
|
||||
};
|
||||
|
||||
calc_sha256_4k_hash(chan._encoded_blk, child.hash);
|
||||
|
||||
if (VERBOSE_FT_EXTENSION)
|
||||
log(" set lvl a ", parent_lvl_idx, " child ", child_idx,
|
||||
": ", child);
|
||||
|
||||
_set_args_for_write_back_of_inner_lvl(req._ft_max_lvl,
|
||||
parent_lvl_idx,
|
||||
chan._new_pbas.pbas[parent_lvl_idx],
|
||||
chan_idx,
|
||||
chan._state,
|
||||
progress,
|
||||
chan._generated_prim);
|
||||
|
||||
chan._lvl_idx += 1;
|
||||
|
||||
} else {
|
||||
|
||||
Tree_level_index const parent_lvl_idx = chan._lvl_idx + 1;
|
||||
Tree_level_index const child_lvl_idx = chan._lvl_idx;
|
||||
Tree_node_index const child_idx = t1_child_idx_for_vba(chan._vba, parent_lvl_idx, req._ft_degree);
|
||||
Type_1_node &child = chan._t1_blks.items[parent_lvl_idx].nodes[child_idx];
|
||||
child = {
|
||||
.pba = chan._new_pbas.pbas[child_lvl_idx],
|
||||
.gen = req._curr_gen,
|
||||
};
|
||||
|
||||
calc_sha256_4k_hash(chan._encoded_blk, child.hash);
|
||||
|
||||
if (VERBOSE_FT_EXTENSION)
|
||||
log(" set lvl b ", parent_lvl_idx, " child ", child_idx,
|
||||
": ", child);
|
||||
|
||||
_set_args_for_write_back_of_inner_lvl(req._ft_max_lvl,
|
||||
parent_lvl_idx,
|
||||
chan._new_pbas.pbas[parent_lvl_idx],
|
||||
chan_idx,
|
||||
chan._state,
|
||||
progress,
|
||||
chan._generated_prim);
|
||||
|
||||
chan._lvl_idx += 1; // = 2
|
||||
|
||||
}
|
||||
break;
|
||||
case Channel::State::WRITE_ROOT_NODE_COMPLETED: {
|
||||
|
||||
if (not chan._generated_prim.succ) {
|
||||
class Primitive_not_successfull_ft_resizing_write_root { };
|
||||
throw Primitive_not_successfull_ft_resizing_write_root { };
|
||||
}
|
||||
|
||||
Tree_level_index const child_lvl_idx = chan._lvl_idx;
|
||||
Physical_block_address const child_pba = chan._new_pbas.pbas[child_lvl_idx];
|
||||
|
||||
req._ft_root = {
|
||||
.pba = child_pba,
|
||||
.gen = req._curr_gen,
|
||||
};
|
||||
|
||||
calc_sha256_4k_hash(chan._encoded_blk, req._ft_root.hash);
|
||||
|
||||
req._ft_nr_of_leaves += req._nr_of_leaves;
|
||||
|
||||
req._success = true;
|
||||
|
||||
chan._state = Channel::State::COMPLETED;
|
||||
progress = true;
|
||||
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Ft_resizing::execute(bool &progress)
|
||||
{
|
||||
for (unsigned idx = 0; idx < NR_OF_CHANNELS; idx++) {
|
||||
|
||||
Channel &channel = _channels[idx];
|
||||
Request &request { channel._request };
|
||||
|
||||
switch (request._type) {
|
||||
case Request::INVALID:
|
||||
break;
|
||||
case Request::FT_EXTENSION_STEP:
|
||||
_execute_ft_extension_step(channel, idx, progress);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool Ft_resizing::_peek_completed_request(uint8_t *buf_ptr,
|
||||
size_t buf_size)
|
||||
{
|
||||
for (Channel &channel : _channels) {
|
||||
if (channel._request._type != Request::INVALID &&
|
||||
channel._state == Channel::COMPLETED) {
|
||||
|
||||
if (sizeof(channel._request) > buf_size) {
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
memcpy(buf_ptr, &channel._request, sizeof(channel._request));
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
void Ft_resizing::_drop_completed_request(Module_request &req)
|
||||
{
|
||||
Module_request_id id { 0 };
|
||||
id = req.dst_request_id();
|
||||
if (id >= NR_OF_CHANNELS) {
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
Channel &chan { _channels[id] };
|
||||
if (chan._request._type == Request::INVALID ||
|
||||
chan._state != Channel::COMPLETED) {
|
||||
|
||||
class Exception_2 { };
|
||||
throw Exception_2 { };
|
||||
}
|
||||
chan._request._type = Request::INVALID;
|
||||
}
|
||||
|
||||
|
||||
bool Ft_resizing::_peek_generated_request(uint8_t *buf_ptr,
|
||||
size_t buf_size)
|
||||
{
|
||||
for (uint32_t id { 0 }; id < NR_OF_CHANNELS; id++) {
|
||||
|
||||
Channel &chan { _channels[id] };
|
||||
Request &req { chan._request };
|
||||
if (req._type == Request::INVALID)
|
||||
continue;
|
||||
|
||||
switch (chan._state) {
|
||||
case Channel::WRITE_ROOT_NODE_PENDING:
|
||||
case Channel::WRITE_INNER_NODE_PENDING:
|
||||
|
||||
if (chan._lvl_idx > 1)
|
||||
chan._t1_blks.items[chan._lvl_idx].encode_to_blk(chan._encoded_blk);
|
||||
else
|
||||
chan._t2_blk.encode_to_blk(chan._encoded_blk);
|
||||
construct_in_buf<Block_io_request>(
|
||||
buf_ptr, buf_size, FT_RESIZING, id,
|
||||
Block_io_request::WRITE, 0, 0, 0,
|
||||
chan._generated_prim.blk_nr, 0, 1,
|
||||
(void *)&chan._encoded_blk, nullptr);
|
||||
|
||||
return true;
|
||||
|
||||
case Channel::READ_ROOT_NODE_PENDING:
|
||||
|
||||
construct_in_buf<Block_io_request>(
|
||||
buf_ptr, buf_size, FT_RESIZING, id,
|
||||
Block_io_request::READ, 0, 0, 0,
|
||||
chan._generated_prim.blk_nr, 0, 1,
|
||||
(void *)&chan._encoded_blk, nullptr);
|
||||
|
||||
return true;
|
||||
|
||||
case Channel::READ_INNER_NODE_PENDING:
|
||||
|
||||
construct_in_buf<Block_io_request>(
|
||||
buf_ptr, buf_size, FT_RESIZING, id,
|
||||
Block_io_request::READ, 0, 0, 0,
|
||||
chan._generated_prim.blk_nr, 0, 1, (void *)&chan._encoded_blk,
|
||||
nullptr);
|
||||
|
||||
return true;
|
||||
|
||||
case Channel::State::ALLOC_PBA_PENDING:
|
||||
|
||||
Meta_tree_request::create(
|
||||
buf_ptr, buf_size, FT_RESIZING, id, Meta_tree_request::UPDATE,
|
||||
(void *)req._mt_root_pba_ptr,
|
||||
(void *)req._mt_root_gen_ptr,
|
||||
(void *)req._mt_root_hash_ptr,
|
||||
req._mt_max_level,
|
||||
req._mt_degree,
|
||||
req._mt_leaves,
|
||||
req._curr_gen,
|
||||
chan._old_pbas.pbas[chan._alloc_lvl_idx]);
|
||||
|
||||
return true;
|
||||
|
||||
case Channel::EXTEND_MT_BY_ONE_LEAF_PENDING:
|
||||
|
||||
class Exception_10 { };
|
||||
throw Exception_10 { };
|
||||
|
||||
default: break;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
void Ft_resizing::_drop_generated_request(Module_request &mod_req)
|
||||
{
|
||||
Module_request_id const id { mod_req.src_request_id() };
|
||||
if (id >= NR_OF_CHANNELS) {
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
Channel &chan { _channels[id] };
|
||||
switch (chan._state) {
|
||||
case Channel::READ_ROOT_NODE_PENDING: chan._state = Channel::READ_ROOT_NODE_IN_PROGRESS; break;
|
||||
case Channel::READ_INNER_NODE_PENDING: chan._state = Channel::READ_INNER_NODE_IN_PROGRESS; break;
|
||||
case Channel::WRITE_ROOT_NODE_PENDING: chan._state = Channel::WRITE_ROOT_NODE_IN_PROGRESS; break;
|
||||
case Channel::WRITE_INNER_NODE_PENDING: chan._state = Channel::WRITE_INNER_NODE_IN_PROGRESS; break;
|
||||
case Channel::ALLOC_PBA_PENDING: chan._state = Channel::ALLOC_PBA_IN_PROGRESS; break;
|
||||
default:
|
||||
class Exception_2 { };
|
||||
throw Exception_2 { };
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Ft_resizing::generated_request_complete(Module_request &mod_req)
|
||||
{
|
||||
Module_request_id const id { mod_req.src_request_id() };
|
||||
if (id >= NR_OF_CHANNELS) {
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
Channel &chan { _channels[id] };
|
||||
switch (mod_req.dst_module_id()) {
|
||||
case BLOCK_IO:
|
||||
{
|
||||
Block_io_request &blk_io_req { *static_cast<Block_io_request *>(&mod_req) };
|
||||
chan._generated_prim.succ = blk_io_req.success();
|
||||
switch (chan._state) {
|
||||
case Channel::READ_ROOT_NODE_IN_PROGRESS:
|
||||
chan._t1_blks.items[chan._lvl_idx].decode_from_blk(chan._encoded_blk);
|
||||
chan._state = Channel::READ_ROOT_NODE_COMPLETED;
|
||||
break;
|
||||
case Channel::READ_INNER_NODE_IN_PROGRESS:
|
||||
if (chan._lvl_idx > 1)
|
||||
chan._t1_blks.items[chan._lvl_idx].decode_from_blk(chan._encoded_blk);
|
||||
else
|
||||
chan._t2_blk.decode_from_blk(chan._encoded_blk);
|
||||
chan._state = Channel::READ_INNER_NODE_COMPLETED;
|
||||
break;
|
||||
case Channel::WRITE_ROOT_NODE_IN_PROGRESS: chan._state = Channel::WRITE_ROOT_NODE_COMPLETED; break;
|
||||
case Channel::WRITE_INNER_NODE_IN_PROGRESS: chan._state = Channel::WRITE_INNER_NODE_COMPLETED; break;
|
||||
default:
|
||||
class Exception_4 { };
|
||||
throw Exception_4 { };
|
||||
}
|
||||
break;
|
||||
}
|
||||
case META_TREE:
|
||||
{
|
||||
Meta_tree_request &mt_req { *static_cast<Meta_tree_request *>(&mod_req) };
|
||||
chan._generated_prim.succ = mt_req.success();
|
||||
switch (chan._state) {
|
||||
case Channel::ALLOC_PBA_IN_PROGRESS:
|
||||
chan._new_pbas.pbas[chan._alloc_lvl_idx] = mt_req.new_pba();
|
||||
chan._state = Channel::ALLOC_PBA_COMPLETED;
|
||||
break;
|
||||
default:
|
||||
class Exception_7 { };
|
||||
throw Exception_7 { };
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
class Exception_5 { };
|
||||
throw Exception_5 { };
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool Ft_resizing::ready_to_submit_request()
|
||||
{
|
||||
for (Channel &channel : _channels) {
|
||||
if (channel._request._type == Request::INVALID)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
void Ft_resizing::submit_request(Module_request &mod_req)
|
||||
{
|
||||
for (Module_request_id id { 0 }; id < NR_OF_CHANNELS; id++) {
|
||||
Channel &chan { _channels[id] };
|
||||
if (chan._request._type == Request::INVALID) {
|
||||
mod_req.dst_request_id(id);
|
||||
chan._request = *static_cast<Request *>(&mod_req);
|
||||
chan._state = Channel::SUBMITTED;
|
||||
return;
|
||||
}
|
||||
}
|
||||
class Invalid_call { };
|
||||
throw Invalid_call { };
|
||||
}
|
43
repos/gems/src/lib/tresor/hash.cc
Normal file
43
repos/gems/src/lib/tresor/hash.cc
Normal file
@ -0,0 +1,43 @@
|
||||
/*
|
||||
* \brief Calculate and check hashes of tresor data blocks
|
||||
* \author Martin Stein
|
||||
* \date 2023-02-13
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2023 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
/* tresor includes */
|
||||
#include <tresor/hash.h>
|
||||
#include <tresor/types.h>
|
||||
|
||||
/* libcrypto */
|
||||
#include <openssl/sha.h>
|
||||
|
||||
bool Tresor::check_hash(Block const &blk, Hash const &expected_hash)
|
||||
{
|
||||
Hash got_hash;
|
||||
calc_hash(blk, got_hash);
|
||||
return got_hash == expected_hash;
|
||||
}
|
||||
|
||||
|
||||
void Tresor::calc_hash(Block const &blk, Hash &hash)
|
||||
{
|
||||
SHA256_CTX context { };
|
||||
ASSERT(SHA256_Init(&context));
|
||||
ASSERT(SHA256_Update(&context, &blk, BLOCK_SIZE));
|
||||
ASSERT(SHA256_Final((unsigned char *)(&hash), &context));
|
||||
}
|
||||
|
||||
|
||||
Tresor::Hash Tresor::hash(Block const &blk)
|
||||
{
|
||||
Hash hash { };
|
||||
calc_hash(blk, hash);
|
||||
return hash;
|
||||
}
|
@ -1,141 +0,0 @@
|
||||
/*
|
||||
* \brief Managing block allocation for the initialization of a Tresor device
|
||||
* \author Josef Soentgen
|
||||
* \date 2023-02-28
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2023 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
#ifndef _TRESOR__BLOCK_ALLOCATOR_H_
|
||||
#define _TRESOR__BLOCK_ALLOCATOR_H_
|
||||
|
||||
/* tresor includes */
|
||||
#include <tresor/types.h>
|
||||
#include <tresor/module.h>
|
||||
|
||||
namespace Tresor {
|
||||
|
||||
class Block_allocator;
|
||||
class Block_allocator_request;
|
||||
class Block_allocator_channel;
|
||||
}
|
||||
|
||||
|
||||
class Tresor::Block_allocator_request : public Module_request
|
||||
{
|
||||
public:
|
||||
|
||||
enum Type { INVALID = 0, GET = 1, };
|
||||
|
||||
private:
|
||||
|
||||
friend class Block_allocator;
|
||||
friend class Block_allocator_channel;
|
||||
|
||||
Type _type { INVALID };
|
||||
uint64_t _blk_nr { 0 };
|
||||
bool _success { false };
|
||||
|
||||
public:
|
||||
|
||||
Block_allocator_request() { }
|
||||
|
||||
Block_allocator_request(Module_id src_module_id,
|
||||
Module_request_id src_request_id);
|
||||
|
||||
static void create(void *buf_ptr,
|
||||
size_t buf_size,
|
||||
uint64_t src_module_id,
|
||||
uint64_t src_request_id,
|
||||
size_t req_type);
|
||||
|
||||
uint64_t blk_nr() const { return _blk_nr; }
|
||||
|
||||
Type type() const { return _type; }
|
||||
|
||||
bool success() const { return _success; }
|
||||
|
||||
static char const *type_to_string(Type type);
|
||||
|
||||
|
||||
/********************
|
||||
** Module_request **
|
||||
********************/
|
||||
|
||||
void print(Output &out) const override;
|
||||
};
|
||||
|
||||
|
||||
class Tresor::Block_allocator_channel
|
||||
{
|
||||
private:
|
||||
|
||||
friend class Block_allocator;
|
||||
|
||||
enum State { INACTIVE, SUBMITTED, PENDING, COMPLETE };
|
||||
|
||||
State _state { INACTIVE };
|
||||
Block_allocator_request _request { };
|
||||
};
|
||||
|
||||
|
||||
class Tresor::Block_allocator : public Module
|
||||
{
|
||||
private:
|
||||
|
||||
using Request = Block_allocator_request;
|
||||
using Channel = Block_allocator_channel;
|
||||
|
||||
uint64_t const _first_block;
|
||||
uint64_t _nr_of_blks;
|
||||
|
||||
enum { NR_OF_CHANNELS = 1 };
|
||||
|
||||
Channel _channels[NR_OF_CHANNELS] { };
|
||||
|
||||
void _execute_get(Channel &channel,
|
||||
bool &progress);
|
||||
|
||||
void _mark_req_failed(Channel &channel,
|
||||
bool &progress,
|
||||
char const *str);
|
||||
|
||||
void _mark_req_successful(Channel &channel,
|
||||
bool &progress);
|
||||
|
||||
|
||||
/************
|
||||
** Module **
|
||||
************/
|
||||
|
||||
bool _peek_completed_request(uint8_t *buf_ptr,
|
||||
size_t buf_size) override;
|
||||
|
||||
void _drop_completed_request(Module_request &req) override;
|
||||
|
||||
public:
|
||||
|
||||
Block_allocator(uint64_t first_block);
|
||||
|
||||
uint64_t first_block() const { return _first_block; }
|
||||
|
||||
uint64_t nr_of_blks() const { return _nr_of_blks; }
|
||||
|
||||
/************
|
||||
** Module **
|
||||
************/
|
||||
|
||||
bool ready_to_submit_request() override;
|
||||
|
||||
void submit_request(Module_request &req) override;
|
||||
|
||||
void execute(bool &) override;
|
||||
|
||||
};
|
||||
|
||||
#endif /* _TRESOR__BLOCK_ALLOCATOR_H_ */
|
@ -16,8 +16,7 @@
|
||||
|
||||
/* tresor includes */
|
||||
#include <tresor/types.h>
|
||||
#include <tresor/module.h>
|
||||
#include <tresor/vfs_utilities.h>
|
||||
#include <tresor/file.h>
|
||||
|
||||
namespace Tresor {
|
||||
|
||||
@ -28,82 +27,88 @@ namespace Tresor {
|
||||
|
||||
class Tresor::Block_io_request : public Module_request
|
||||
{
|
||||
friend class Block_io_channel;
|
||||
|
||||
public:
|
||||
|
||||
enum Type {
|
||||
INVALID = 0, READ = 1, WRITE = 2, SYNC = 3, READ_CLIENT_DATA = 4,
|
||||
WRITE_CLIENT_DATA = 5 };
|
||||
enum Type { READ, WRITE, SYNC, READ_CLIENT_DATA, WRITE_CLIENT_DATA };
|
||||
|
||||
private:
|
||||
|
||||
friend class Block_io;
|
||||
friend class Block_io_channel;
|
||||
Type const _type;
|
||||
Request_offset const _client_req_offset;
|
||||
Request_tag const _client_req_tag;
|
||||
Key_id const _key_id;
|
||||
Physical_block_address const _pba;
|
||||
Virtual_block_address const _vba;
|
||||
Block &_blk;
|
||||
Hash &_hash;
|
||||
bool &_success;
|
||||
|
||||
Type _type { INVALID };
|
||||
uint64_t _client_req_offset { 0 };
|
||||
uint64_t _client_req_tag { 0 };
|
||||
uint32_t _key_id { 0 };
|
||||
uint64_t _pba { 0 };
|
||||
uint64_t _vba { 0 };
|
||||
uint64_t _blk_count { 0 };
|
||||
addr_t _blk_ptr { 0 };
|
||||
addr_t _hash_ptr { 0 };
|
||||
bool _success { false };
|
||||
NONCOPYABLE(Block_io_request);
|
||||
|
||||
public:
|
||||
|
||||
Block_io_request() { }
|
||||
Block_io_request(Module_id, Module_channel_id, Type, Request_offset, Request_tag, Key_id,
|
||||
Physical_block_address, Virtual_block_address, Block &, Hash &, bool &);
|
||||
|
||||
Block_io_request(uint64_t src_module_id,
|
||||
uint64_t src_request_id,
|
||||
size_t req_type,
|
||||
uint64_t client_req_offset,
|
||||
uint64_t client_req_tag,
|
||||
uint32_t key_id,
|
||||
uint64_t pba,
|
||||
uint64_t vba,
|
||||
uint64_t blk_count,
|
||||
void *blk_ptr,
|
||||
void *hash_ptr);
|
||||
static char const *type_to_string(Type);
|
||||
|
||||
Type type() const { return _type; }
|
||||
|
||||
bool success() const { return _success; }
|
||||
|
||||
static char const *type_to_string(Type type);
|
||||
|
||||
char const *type_name() const { return type_to_string(_type); }
|
||||
|
||||
|
||||
/********************
|
||||
** Module_request **
|
||||
********************/
|
||||
|
||||
void print(Output &out) const override;
|
||||
void print(Output &out) const override { Genode::print(out, type_to_string(_type), " pba ", _pba); }
|
||||
};
|
||||
|
||||
class Tresor::Block_io_channel
|
||||
class Tresor::Block_io_channel : public Module_channel
|
||||
{
|
||||
private:
|
||||
|
||||
friend class Block_io;
|
||||
using Request = Block_io_request;
|
||||
|
||||
enum State {
|
||||
INACTIVE, SUBMITTED, PENDING, IN_PROGRESS, COMPLETE,
|
||||
ENCRYPT_CLIENT_DATA_PENDING,
|
||||
ENCRYPT_CLIENT_DATA_IN_PROGRESS,
|
||||
ENCRYPT_CLIENT_DATA_COMPLETE,
|
||||
DECRYPT_CLIENT_DATA_PENDING,
|
||||
DECRYPT_CLIENT_DATA_IN_PROGRESS,
|
||||
DECRYPT_CLIENT_DATA_COMPLETE
|
||||
};
|
||||
REQ_SUBMITTED, REQ_COMPLETE, CIPHERTEXT_BLK_OBTAINED, PLAINTEXT_BLK_SUPPLIED, REQ_GENERATED,
|
||||
READ_OK, WRITE_OK, SYNC_OK, FILE_ERR };
|
||||
|
||||
State _state { INACTIVE };
|
||||
Block_io_request _request { };
|
||||
Vfs::file_offset _nr_of_processed_bytes { 0 };
|
||||
size_t _nr_of_remaining_bytes { 0 };
|
||||
Block _blk_buf { };
|
||||
bool _generated_req_success { false };
|
||||
State _state { REQ_COMPLETE };
|
||||
Block _blk { };
|
||||
bool _generated_req_success { false };
|
||||
Block_io_request *_req_ptr { };
|
||||
Vfs::Env &_vfs_env;
|
||||
Tresor::Path const _path;
|
||||
Read_write_file<State> _file { _state, _vfs_env, _path };
|
||||
|
||||
NONCOPYABLE(Block_io_channel);
|
||||
|
||||
void _generated_req_completed(State_uint) override;
|
||||
|
||||
template <typename REQUEST, typename... ARGS>
|
||||
void _generate_req(State_uint state, bool &progress, ARGS &&... args)
|
||||
{
|
||||
_state = REQ_GENERATED;
|
||||
generate_req<REQUEST>(state, progress, args..., _generated_req_success);
|
||||
}
|
||||
|
||||
void _request_submitted(Module_request &) override;
|
||||
|
||||
bool _request_complete() override { return _state == REQ_COMPLETE; }
|
||||
|
||||
void _read(bool &);
|
||||
|
||||
void _write(bool &);
|
||||
|
||||
void _read_client_data(bool &);
|
||||
|
||||
void _write_client_data(bool &);
|
||||
|
||||
void _sync(bool &);
|
||||
|
||||
void _mark_req_failed(bool &, Error_string);
|
||||
|
||||
void _mark_req_successful(bool &);
|
||||
|
||||
public:
|
||||
|
||||
Block_io_channel(Module_channel_id, Vfs::Env &, Xml_node const &);
|
||||
|
||||
void execute(bool &);
|
||||
};
|
||||
|
||||
class Tresor::Block_io : public Module
|
||||
@ -112,67 +117,48 @@ class Tresor::Block_io : public Module
|
||||
|
||||
using Request = Block_io_request;
|
||||
using Channel = Block_io_channel;
|
||||
using Read_result = Vfs::File_io_service::Read_result;
|
||||
using Write_result = Vfs::File_io_service::Write_result;
|
||||
using file_size = Vfs::file_size;
|
||||
using file_offset = Vfs::file_offset;
|
||||
|
||||
enum { NR_OF_CHANNELS = 1 };
|
||||
Constructible<Channel> _channels[1] { };
|
||||
|
||||
String<32> const _path;
|
||||
Vfs::Env &_vfs_env;
|
||||
Vfs::Vfs_handle &_vfs_handle { vfs_open_rw(_vfs_env, _path) };
|
||||
Channel _channels[NR_OF_CHANNELS] { };
|
||||
|
||||
void _execute_read(Channel &channel,
|
||||
bool &progress);
|
||||
|
||||
void _execute_write(Channel &channel,
|
||||
bool &progress);
|
||||
|
||||
void _execute_read_client_data(Channel &channel,
|
||||
bool &progress);
|
||||
|
||||
void _execute_write_client_data(Channel &channel,
|
||||
bool &progress);
|
||||
|
||||
void _execute_sync(Channel &channel,
|
||||
bool &progress);
|
||||
|
||||
void _mark_req_failed(Channel &channel,
|
||||
bool &progress,
|
||||
char const *str);
|
||||
|
||||
void _mark_req_successful(Channel &channel,
|
||||
bool &progress);
|
||||
|
||||
|
||||
/************
|
||||
** Module **
|
||||
************/
|
||||
|
||||
bool ready_to_submit_request() override;
|
||||
|
||||
void submit_request(Module_request &req) override;
|
||||
|
||||
bool _peek_completed_request(uint8_t *buf_ptr,
|
||||
size_t buf_size) override;
|
||||
|
||||
void _drop_completed_request(Module_request &req) override;
|
||||
|
||||
void execute(bool &) override;
|
||||
|
||||
bool _peek_generated_request(uint8_t *buf_ptr,
|
||||
size_t buf_size) override;
|
||||
|
||||
void _drop_generated_request(Module_request &mod_req) override;
|
||||
|
||||
void generated_request_complete(Module_request &req) override;
|
||||
NONCOPYABLE(Block_io);
|
||||
|
||||
public:
|
||||
|
||||
Block_io(Vfs::Env &vfs_env,
|
||||
Xml_node const &xml_node);
|
||||
struct Read : Request
|
||||
{
|
||||
Read(Module_id m, Module_channel_id c, Physical_block_address a, Block &b, bool &s)
|
||||
: Request(m, c, Request::READ, 0, 0, 0, a, 0, b, *(Hash*)0, s) { }
|
||||
};
|
||||
|
||||
struct Write : Request
|
||||
{
|
||||
Write(Module_id m, Module_channel_id c, Physical_block_address a, Block const &b, bool &s)
|
||||
: Request(m, c, Request::WRITE, 0, 0, 0, a, 0, *const_cast<Block*>(&b), *(Hash*)0, s) { }
|
||||
};
|
||||
|
||||
struct Sync : Request
|
||||
{
|
||||
Sync(Module_id m, Module_channel_id c, bool &s)
|
||||
: Request(m, c, Request::SYNC, 0, 0, 0, 0, 0, *(Block*)0, *(Hash*)0, s) { }
|
||||
};
|
||||
|
||||
struct Write_client_data : Request
|
||||
{
|
||||
Write_client_data(Module_id m, Module_channel_id c, Physical_block_address p, Virtual_block_address v,
|
||||
Key_id k, Request_tag t, Request_offset o, Block const &b, Hash &h, bool &s)
|
||||
: Request(m, c, Request::WRITE_CLIENT_DATA, o, t, k, p, v, *const_cast<Block*>(&b), h, s) { }
|
||||
};
|
||||
|
||||
struct Read_client_data : Request
|
||||
{
|
||||
Read_client_data(Module_id m, Module_channel_id c, Physical_block_address p, Virtual_block_address v,
|
||||
Key_id k, Request_tag t, Request_offset o, Block &b, bool &s)
|
||||
: Request(m, c, Request::READ_CLIENT_DATA, o, t, k, p, v, b, *(Hash*)0, s) { }
|
||||
};
|
||||
|
||||
Block_io(Vfs::Env &, Xml_node const &);
|
||||
|
||||
void execute(bool &) override;
|
||||
};
|
||||
|
||||
#endif /* _TRESOR__BLOCK_IO_H_ */
|
||||
|
@ -16,88 +16,53 @@
|
||||
|
||||
/* tresor includes */
|
||||
#include <tresor/types.h>
|
||||
#include <tresor/module.h>
|
||||
|
||||
namespace Tresor {
|
||||
namespace Tresor { class Client_data_request; }
|
||||
|
||||
class Client_data_request;
|
||||
}
|
||||
namespace Vfs_tresor { class Client_data; }
|
||||
|
||||
namespace Vfs_tresor {
|
||||
|
||||
class Wrapper;
|
||||
}
|
||||
|
||||
namespace Tresor_tester {
|
||||
|
||||
class Main;
|
||||
}
|
||||
namespace Tresor_tester { class Client_data; }
|
||||
|
||||
class Tresor::Client_data_request : public Module_request
|
||||
{
|
||||
friend class ::Vfs_tresor::Client_data;
|
||||
friend class ::Tresor_tester::Client_data;
|
||||
|
||||
public:
|
||||
|
||||
enum Type { INVALID, OBTAIN_PLAINTEXT_BLK, SUPPLY_PLAINTEXT_BLK };
|
||||
enum Type { OBTAIN_PLAINTEXT_BLK, SUPPLY_PLAINTEXT_BLK };
|
||||
|
||||
private:
|
||||
|
||||
friend class ::Vfs_tresor::Wrapper;
|
||||
friend class ::Tresor_tester::Main;
|
||||
Type const _type;
|
||||
Request_offset const _req_off;
|
||||
Request_tag const _req_tag;
|
||||
Physical_block_address const _pba;
|
||||
Virtual_block_address const _vba;
|
||||
Block &_blk;
|
||||
bool &_success;
|
||||
|
||||
Type _type { INVALID };
|
||||
uint64_t _client_req_offset { 0 };
|
||||
uint64_t _client_req_tag { 0 };
|
||||
uint64_t _pba { 0 };
|
||||
uint64_t _vba { 0 };
|
||||
addr_t _plaintext_blk_ptr { 0 };
|
||||
bool _success { false };
|
||||
NONCOPYABLE(Client_data_request);
|
||||
|
||||
public:
|
||||
|
||||
Client_data_request() { }
|
||||
|
||||
Type type() const { return _type; }
|
||||
|
||||
|
||||
/*****************************************************
|
||||
** can be removed once the tresor translation is done **
|
||||
*****************************************************/
|
||||
|
||||
Client_data_request(Module_id src_module_id,
|
||||
Module_request_id src_request_id,
|
||||
Type type,
|
||||
uint64_t client_req_offset,
|
||||
uint64_t client_req_tag,
|
||||
uint64_t pba,
|
||||
uint64_t vba,
|
||||
addr_t plaintext_blk_ptr)
|
||||
Client_data_request(Module_id src_mod_id, Module_channel_id src_chan_id, Type type,
|
||||
Request_offset req_off, Request_tag req_tag, Physical_block_address pba,
|
||||
Virtual_block_address vba, Block &blk, bool &success)
|
||||
:
|
||||
Module_request { src_module_id, src_request_id, CLIENT_DATA },
|
||||
_type { type },
|
||||
_client_req_offset { client_req_offset },
|
||||
_client_req_tag { client_req_tag },
|
||||
_pba { pba },
|
||||
_vba { vba },
|
||||
_plaintext_blk_ptr { plaintext_blk_ptr }
|
||||
Module_request { src_mod_id, src_chan_id, CLIENT_DATA }, _type { type }, _req_off { req_off },
|
||||
_req_tag { req_tag }, _pba { pba }, _vba { vba }, _blk { blk }, _success { success }
|
||||
{ }
|
||||
|
||||
bool success() const { return _success; }
|
||||
|
||||
static char const *type_to_string(Type type)
|
||||
{
|
||||
switch (type) {
|
||||
case INVALID: return "invalid";
|
||||
case OBTAIN_PLAINTEXT_BLK: return "obtain_plaintext_blk";
|
||||
case SUPPLY_PLAINTEXT_BLK: return "supply_plaintext_blk";
|
||||
}
|
||||
return "?";
|
||||
ASSERT_NEVER_REACHED;
|
||||
}
|
||||
|
||||
|
||||
/********************
|
||||
** Module_request **
|
||||
********************/
|
||||
|
||||
void print(Output &out) const override { Genode::print(out, type_to_string(_type)); }
|
||||
};
|
||||
|
||||
|
@ -1,38 +0,0 @@
|
||||
/*
|
||||
* \brief Size protected wrapper for the manual placement of objects
|
||||
* \author Martin Stein
|
||||
* \date 2023-03-23
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2023 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
#ifndef _TRESOR__CONSTRUCT_IN_BUF_H_
|
||||
#define _TRESOR__CONSTRUCT_IN_BUF_H_
|
||||
|
||||
/* base includes */
|
||||
#include <util/construct_at.h>
|
||||
|
||||
namespace Tresor {
|
||||
|
||||
using namespace Genode;
|
||||
|
||||
template <typename T,
|
||||
typename... ARGS>
|
||||
static inline void construct_in_buf(void *buf_ptr,
|
||||
size_t buf_size,
|
||||
ARGS &&... args)
|
||||
{
|
||||
if (sizeof(T) > buf_size) {
|
||||
class Buffer_too_small { };
|
||||
throw Buffer_too_small { };
|
||||
}
|
||||
construct_at<T>(buf_ptr, args...);
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* _TRESOR__CONSTRUCT_IN_BUF_H_ */
|
@ -16,8 +16,7 @@
|
||||
|
||||
/* tresor includes */
|
||||
#include <tresor/types.h>
|
||||
#include <tresor/module.h>
|
||||
#include <tresor/vfs_utilities.h>
|
||||
#include <tresor/file.h>
|
||||
|
||||
namespace Tresor {
|
||||
|
||||
@ -28,97 +27,107 @@ namespace Tresor {
|
||||
|
||||
class Tresor::Crypto_request : public Module_request
|
||||
{
|
||||
friend class Crypto_channel;
|
||||
|
||||
public:
|
||||
|
||||
enum Type {
|
||||
INVALID = 0, ADD_KEY = 1, REMOVE_KEY = 2, DECRYPT = 3, ENCRYPT = 4,
|
||||
DECRYPT_CLIENT_DATA = 5, ENCRYPT_CLIENT_DATA = 6 };
|
||||
enum Type { ADD_KEY, REMOVE_KEY, DECRYPT, ENCRYPT, DECRYPT_CLIENT_DATA, ENCRYPT_CLIENT_DATA };
|
||||
|
||||
private:
|
||||
|
||||
friend class Crypto;
|
||||
friend class Crypto_channel;
|
||||
Type const _type;
|
||||
Request_offset const _client_req_offset;
|
||||
Request_tag const _client_req_tag;
|
||||
Physical_block_address const _pba;
|
||||
Virtual_block_address const _vba;
|
||||
Key_id const _key_id;
|
||||
Key_value const &_key_plaintext;
|
||||
Block &_blk;
|
||||
bool &_success;
|
||||
|
||||
Type _type { INVALID };
|
||||
uint64_t _client_req_offset { 0 };
|
||||
uint64_t _client_req_tag { 0 };
|
||||
uint64_t _pba { 0 };
|
||||
uint64_t _vba { 0 };
|
||||
uint32_t _key_id { 0 };
|
||||
addr_t _key_plaintext_ptr { 0 };
|
||||
addr_t _plaintext_blk_ptr { 0 };
|
||||
addr_t _ciphertext_blk_ptr { 0 };
|
||||
bool _success { false };
|
||||
NONCOPYABLE(Crypto_request);
|
||||
|
||||
public:
|
||||
|
||||
Crypto_request() { }
|
||||
Crypto_request(Module_id, Module_channel_id, Type, Request_offset, Request_tag, Key_id,
|
||||
Key_value const &, Physical_block_address, Virtual_block_address, Block &, bool &);
|
||||
|
||||
Type type() const { return _type; }
|
||||
static const char *type_to_string(Type);
|
||||
|
||||
Crypto_request(uint64_t src_module_id,
|
||||
uint64_t src_request_id,
|
||||
size_t req_type,
|
||||
uint64_t client_req_offset,
|
||||
uint64_t client_req_tag,
|
||||
uint32_t key_id,
|
||||
void *key_plaintext_ptr,
|
||||
uint64_t pba,
|
||||
uint64_t vba,
|
||||
void *plaintext_blk_ptr,
|
||||
void *ciphertext_blk_ptr);
|
||||
|
||||
bool success() const { return _success; }
|
||||
|
||||
static const char *type_to_string(Type type);
|
||||
|
||||
|
||||
/********************
|
||||
** Module_request **
|
||||
********************/
|
||||
|
||||
void print(Output &out) const override
|
||||
{
|
||||
Genode::print(out, type_to_string(_type));
|
||||
switch (_type) {
|
||||
case ADD_KEY:
|
||||
case REMOVE_KEY:
|
||||
Genode::print(out, " ", _key_id);
|
||||
break;
|
||||
case DECRYPT:
|
||||
case ENCRYPT:
|
||||
case DECRYPT_CLIENT_DATA:
|
||||
case ENCRYPT_CLIENT_DATA:
|
||||
Genode::print(out, " pba ", _pba);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
void print(Output &) const override;
|
||||
};
|
||||
|
||||
class Tresor::Crypto_channel
|
||||
class Tresor::Crypto_channel : public Module_channel
|
||||
{
|
||||
private:
|
||||
|
||||
friend class Crypto;
|
||||
using Request = Crypto_request;
|
||||
|
||||
enum State {
|
||||
INACTIVE, SUBMITTED, COMPLETE, OBTAIN_PLAINTEXT_BLK_PENDING,
|
||||
OBTAIN_PLAINTEXT_BLK_IN_PROGRESS, OBTAIN_PLAINTEXT_BLK_COMPLETE,
|
||||
SUPPLY_PLAINTEXT_BLK_PENDING, SUPPLY_PLAINTEXT_BLK_IN_PROGRESS,
|
||||
SUPPLY_PLAINTEXT_BLK_COMPLETE, OP_WRITTEN_TO_VFS_HANDLE,
|
||||
QUEUE_READ_SUCCEEDED };
|
||||
REQ_SUBMITTED, REQ_COMPLETE, PLAINTEXT_BLK_OBTAINED, PLAINTEXT_BLK_SUPPLIED, REQ_GENERATED,
|
||||
READ_OK, WRITE_OK, FILE_ERR };
|
||||
|
||||
State _state { INACTIVE };
|
||||
Crypto_request _request { };
|
||||
bool _generated_req_success { false };
|
||||
Vfs::Vfs_handle *_vfs_handle { nullptr };
|
||||
char _blk_buf[BLOCK_SIZE] { 0 };
|
||||
struct Key_directory
|
||||
{
|
||||
Crypto_channel &chan;
|
||||
Key_id key_id;
|
||||
Read_write_file<State> encrypt_file { chan._state, chan._vfs_env, { chan._path, "/keys/", key_id, "/encrypt" } };
|
||||
Read_write_file<State> decrypt_file { chan._state, chan._vfs_env, { chan._path, "/keys/", key_id, "/decrypt" } };
|
||||
|
||||
NONCOPYABLE(Key_directory);
|
||||
|
||||
Key_directory(Crypto_channel &chan, Key_id key_id) : chan { chan }, key_id { key_id } { }
|
||||
};
|
||||
|
||||
Vfs::Env &_vfs_env;
|
||||
Tresor::Path const _path;
|
||||
char _add_key_buf[sizeof(Key_id) + KEY_SIZE] { };
|
||||
Write_only_file<State> _add_key_file { _state, _vfs_env, { _path, "/add_key" } };
|
||||
Write_only_file<State> _remove_key_file { _state, _vfs_env, { _path, "/remove_key" } };
|
||||
Constructible<Key_directory> _key_dirs[2] { };
|
||||
State _state { REQ_COMPLETE };
|
||||
bool _generated_req_success { false };
|
||||
Block _blk { };
|
||||
Request *_req_ptr { };
|
||||
|
||||
NONCOPYABLE(Crypto_channel);
|
||||
|
||||
void _generated_req_completed(State_uint) override;
|
||||
|
||||
void _request_submitted(Module_request &) override;
|
||||
|
||||
bool _request_complete() override { return _state == REQ_COMPLETE; }
|
||||
|
||||
template <typename REQUEST, typename... ARGS>
|
||||
void _generate_req(State_uint state, bool &progress, ARGS &&... args)
|
||||
{
|
||||
_state = REQ_GENERATED;
|
||||
generate_req<REQUEST>(state, progress, args..., _generated_req_success);
|
||||
}
|
||||
|
||||
void _add_key(bool &);
|
||||
|
||||
void _remove_key(bool &);
|
||||
|
||||
void _decrypt(bool &);
|
||||
|
||||
void _encrypt(bool &);
|
||||
|
||||
void _encrypt_client_data(bool &);
|
||||
|
||||
void _decrypt_client_data(bool &);
|
||||
|
||||
void _mark_req_failed(bool &, char const *);
|
||||
|
||||
void _mark_req_successful(bool &);
|
||||
|
||||
Constructible<Key_directory> &_key_dir(Key_id key_id);
|
||||
|
||||
public:
|
||||
|
||||
Crypto_request const &request() const { return _request; }
|
||||
Crypto_channel(Module_channel_id, Vfs::Env &, Xml_node const &);
|
||||
|
||||
void execute(bool &);
|
||||
};
|
||||
|
||||
class Tresor::Crypto : public Module
|
||||
@ -127,79 +136,40 @@ class Tresor::Crypto : public Module
|
||||
|
||||
using Request = Crypto_request;
|
||||
using Channel = Crypto_channel;
|
||||
using Write_result = Vfs::File_io_service::Write_result;
|
||||
using Read_result = Vfs::File_io_service::Read_result;
|
||||
|
||||
enum { NR_OF_CHANNELS = 4 };
|
||||
Constructible<Channel> _channels[1] { };
|
||||
|
||||
struct Key_directory
|
||||
{
|
||||
Vfs::Vfs_handle *encrypt_handle { nullptr };
|
||||
Vfs::Vfs_handle *decrypt_handle { nullptr };
|
||||
uint32_t key_id { 0 };
|
||||
};
|
||||
|
||||
Vfs::Env &_vfs_env;
|
||||
String<32> const _path;
|
||||
Vfs::Vfs_handle &_add_key_handle;
|
||||
Vfs::Vfs_handle &_remove_key_handle;
|
||||
Channel _channels[NR_OF_CHANNELS] { };
|
||||
Key_directory _key_dirs[2] { };
|
||||
|
||||
Key_directory &_lookup_key_dir(uint32_t key_id);
|
||||
|
||||
void _execute_add_key(Channel &channel,
|
||||
bool &progress);
|
||||
|
||||
void _execute_remove_key(Channel &channel,
|
||||
bool &progress);
|
||||
|
||||
void _execute_decrypt(Channel &channel,
|
||||
bool &progress);
|
||||
|
||||
void _execute_encrypt(Channel &channel,
|
||||
bool &progress);
|
||||
|
||||
void _execute_encrypt_client_data(Channel &channel,
|
||||
bool &progress);
|
||||
|
||||
void _execute_decrypt_client_data(Channel &channel,
|
||||
bool &progress);
|
||||
|
||||
void _mark_req_failed(Channel &channel,
|
||||
bool &progress,
|
||||
char const *str);
|
||||
|
||||
void _mark_req_successful(Channel &channel,
|
||||
bool &progress);
|
||||
|
||||
|
||||
/************
|
||||
** Module **
|
||||
************/
|
||||
|
||||
bool ready_to_submit_request() override;
|
||||
|
||||
void submit_request(Module_request &req) override;
|
||||
|
||||
bool _peek_completed_request(uint8_t *buf_ptr,
|
||||
size_t buf_size) override;
|
||||
|
||||
void _drop_completed_request(Module_request &req) override;
|
||||
|
||||
void execute(bool &) override;
|
||||
|
||||
bool _peek_generated_request(uint8_t *buf_ptr,
|
||||
size_t buf_size) override;
|
||||
|
||||
void _drop_generated_request(Module_request &mod_req) override;
|
||||
|
||||
void generated_request_complete(Module_request &req) override;
|
||||
NONCOPYABLE(Crypto);
|
||||
|
||||
public:
|
||||
|
||||
Crypto(Vfs::Env &vfs_env,
|
||||
Xml_node const &xml_node);
|
||||
struct Add_key : Request
|
||||
{
|
||||
Add_key(Module_id src_mod, Module_channel_id src_chan, Key &key, bool &succ)
|
||||
: Request(src_mod, src_chan, Request::ADD_KEY, 0, 0, key.id, key.value, 0, 0, *(Block*)0, succ) { }
|
||||
};
|
||||
|
||||
struct Remove_key : Request
|
||||
{
|
||||
Remove_key(Module_id src_mod, Module_channel_id src_chan, Key_id key, bool &succ)
|
||||
: Request(src_mod, src_chan, Request::REMOVE_KEY, 0, 0, key, *(Key_value*)0, 0, 0, *(Block*)0, succ) { }
|
||||
};
|
||||
|
||||
struct Decrypt : Request
|
||||
{
|
||||
Decrypt(Module_id src_mod, Module_channel_id src_chan, Key_id key, Physical_block_address pba, Block &blk, bool &succ)
|
||||
: Request(src_mod, src_chan, Request::DECRYPT, 0, 0, key, *(Key_value*)0, pba, 0, blk, succ) { }
|
||||
};
|
||||
|
||||
struct Encrypt : Request
|
||||
{
|
||||
Encrypt(Module_id src_mod, Module_channel_id src_chan, Key_id key, Physical_block_address pba, Block &blk, bool &succ)
|
||||
: Request(src_mod, src_chan, Request::ENCRYPT, 0, 0, key, *(Key_value*)0, pba, 0, blk, succ) { }
|
||||
};
|
||||
|
||||
Crypto(Vfs::Env &, Xml_node const &);
|
||||
|
||||
void execute(bool &) override;
|
||||
};
|
||||
|
||||
#endif /* _TRESOR__CRYPTO_H_ */
|
||||
|
232
repos/gems/src/lib/tresor/include/tresor/file.h
Normal file
232
repos/gems/src/lib/tresor/include/tresor/file.h
Normal file
@ -0,0 +1,232 @@
|
||||
/*
|
||||
* \brief Tresor-local utilities for accessing VFS files
|
||||
* \author Martin Stein
|
||||
* \date 2020-10-29
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2023 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
#ifndef _TRESOR__FILE_H_
|
||||
#define _TRESOR__FILE_H_
|
||||
|
||||
/* base includes */
|
||||
#include <util/string.h>
|
||||
|
||||
/* os includes */
|
||||
#include <vfs/vfs_handle.h>
|
||||
#include <vfs/simple_env.h>
|
||||
|
||||
/* tresor includes */
|
||||
#include <tresor/assertion.h>
|
||||
|
||||
namespace Tresor {
|
||||
|
||||
using namespace Genode;
|
||||
|
||||
using Path = String<128>;
|
||||
|
||||
template <typename> class File;
|
||||
template <typename> class Read_write_file;
|
||||
template <typename> class Write_only_file;
|
||||
}
|
||||
|
||||
template <typename HOST_STATE>
|
||||
class Tresor::File
|
||||
{
|
||||
private:
|
||||
|
||||
using Read_result = Vfs::File_io_service::Read_result;
|
||||
using Write_result = Vfs::File_io_service::Write_result;
|
||||
using Sync_result = Vfs::File_io_service::Sync_result;
|
||||
using Open_result = Vfs::Directory_service::Open_result;
|
||||
|
||||
enum State { IDLE, SYNC_QUEUED, READ_QUEUED, READ_INITIALIZED, WRITE_INITIALIZED, WRITE_OFFSET_APPLIED };
|
||||
|
||||
Vfs::Env &_env;
|
||||
HOST_STATE &_host_state;
|
||||
State _state { IDLE };
|
||||
Vfs::Vfs_handle &_handle;
|
||||
Vfs::file_size _num_processed_bytes { 0 };
|
||||
|
||||
Vfs::Vfs_handle &_open(Tresor::Path path, Vfs::Directory_service::Open_mode mode)
|
||||
{
|
||||
Vfs::Vfs_handle *handle { nullptr };
|
||||
ASSERT(_env.root_dir().open(path.string(), mode, &handle, _env.alloc()) == Open_result::OPEN_OK);
|
||||
return *handle;
|
||||
}
|
||||
|
||||
public:
|
||||
|
||||
File(HOST_STATE &host_state, Vfs::Vfs_handle &handle) : _host_state { host_state }, _handle { handle } { }
|
||||
|
||||
File(HOST_STATE &host_state, Vfs::Env &env, Tresor::Path path, Vfs::Directory_service::Open_mode mode)
|
||||
: _env { env }, _host_state { host_state }, _handle { _open(path, mode) } { }
|
||||
|
||||
~File()
|
||||
{
|
||||
ASSERT(_state == IDLE);
|
||||
_env.root_dir().close(&_handle);
|
||||
}
|
||||
|
||||
void read(HOST_STATE succeeded, HOST_STATE failed, Vfs::file_offset off, Byte_range_ptr dst, bool &progress)
|
||||
{
|
||||
switch (_state) {
|
||||
case IDLE:
|
||||
|
||||
_num_processed_bytes = 0;
|
||||
_state = READ_INITIALIZED;
|
||||
progress = true;
|
||||
break;
|
||||
|
||||
case READ_INITIALIZED:
|
||||
|
||||
_handle.seek(off + _num_processed_bytes);
|
||||
if (!_handle.fs().queue_read(&_handle, dst.num_bytes - _num_processed_bytes))
|
||||
break;
|
||||
|
||||
_state = READ_QUEUED;
|
||||
progress = true;
|
||||
break;
|
||||
|
||||
case READ_QUEUED:
|
||||
{
|
||||
size_t num_read_bytes { 0 };
|
||||
Byte_range_ptr curr_dst { dst.start + _num_processed_bytes, dst.num_bytes - _num_processed_bytes };
|
||||
switch (_handle.fs().complete_read(&_handle, curr_dst, num_read_bytes)) {
|
||||
case Read_result::READ_QUEUED:
|
||||
case Read_result::READ_ERR_WOULD_BLOCK: break;
|
||||
case Read_result::READ_OK:
|
||||
|
||||
_num_processed_bytes += num_read_bytes;
|
||||
if (_num_processed_bytes < dst.num_bytes) {
|
||||
_state = READ_INITIALIZED;
|
||||
progress = true;
|
||||
break;
|
||||
}
|
||||
ASSERT(_num_processed_bytes == dst.num_bytes);
|
||||
_state = IDLE;
|
||||
_host_state = succeeded;
|
||||
progress = true;
|
||||
break;
|
||||
|
||||
default:
|
||||
|
||||
error("read failed");
|
||||
_host_state = failed;
|
||||
_state = IDLE;
|
||||
progress = true;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
default: ASSERT_NEVER_REACHED;
|
||||
}
|
||||
}
|
||||
|
||||
void write(HOST_STATE succeeded, HOST_STATE failed, Vfs::file_offset off, Const_byte_range_ptr src, bool &progress)
|
||||
{
|
||||
switch (_state) {
|
||||
case IDLE:
|
||||
|
||||
_num_processed_bytes = 0;
|
||||
_state = WRITE_INITIALIZED;
|
||||
progress = true;
|
||||
break;
|
||||
|
||||
case WRITE_INITIALIZED:
|
||||
|
||||
_handle.seek(off + _num_processed_bytes);
|
||||
_state = WRITE_OFFSET_APPLIED;
|
||||
progress = true;
|
||||
break;
|
||||
|
||||
case WRITE_OFFSET_APPLIED:
|
||||
{
|
||||
size_t num_written_bytes { 0 };
|
||||
Const_byte_range_ptr curr_src { src.start + _num_processed_bytes, src.num_bytes - _num_processed_bytes };
|
||||
switch (_handle.fs().write(&_handle, curr_src, num_written_bytes)) {
|
||||
case Write_result::WRITE_ERR_WOULD_BLOCK: break;
|
||||
case Write_result::WRITE_OK:
|
||||
|
||||
_num_processed_bytes += num_written_bytes;
|
||||
if (_num_processed_bytes < src.num_bytes) {
|
||||
_state = WRITE_INITIALIZED;
|
||||
progress = true;
|
||||
break;
|
||||
}
|
||||
ASSERT(_num_processed_bytes == src.num_bytes);
|
||||
_state = IDLE;
|
||||
_host_state = succeeded;
|
||||
progress = true;
|
||||
break;
|
||||
|
||||
default:
|
||||
|
||||
error("write failed");
|
||||
_host_state = failed;
|
||||
_state = IDLE;
|
||||
progress = true;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
default: ASSERT_NEVER_REACHED;
|
||||
}
|
||||
}
|
||||
|
||||
void sync(HOST_STATE succeeded, HOST_STATE failed, bool &progress)
|
||||
{
|
||||
switch (_state) {
|
||||
case IDLE:
|
||||
|
||||
if (!_handle.fs().queue_sync(&_handle))
|
||||
break;
|
||||
|
||||
_state = SYNC_QUEUED;
|
||||
progress = true;
|
||||
break;
|
||||
|
||||
case SYNC_QUEUED:
|
||||
|
||||
switch (_handle.fs().complete_sync(&_handle)) {
|
||||
case Sync_result::SYNC_QUEUED: break;
|
||||
case Sync_result::SYNC_OK:
|
||||
|
||||
_state = IDLE;
|
||||
_host_state = succeeded;
|
||||
progress = true;
|
||||
break;
|
||||
|
||||
default:
|
||||
|
||||
error("sync failed");
|
||||
_host_state = failed;
|
||||
_state = IDLE;
|
||||
progress = true;
|
||||
break;
|
||||
}
|
||||
default: break;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template <typename HOST_STATE>
|
||||
struct Tresor::Read_write_file : public File<HOST_STATE>
|
||||
{
|
||||
Read_write_file(HOST_STATE &host_state, Vfs::Env &env, Tresor::Path path)
|
||||
: File<HOST_STATE> { host_state, env, path, Vfs::Directory_service::OPEN_MODE_RDWR } { }
|
||||
};
|
||||
|
||||
template <typename HOST_STATE>
|
||||
struct Tresor::Write_only_file : public File<HOST_STATE>
|
||||
{
|
||||
Write_only_file(HOST_STATE &host_state, Vfs::Env &env, Tresor::Path path)
|
||||
: File<HOST_STATE> { host_state, env, path, Vfs::Directory_service::OPEN_MODE_WRONLY } { }
|
||||
};
|
||||
|
||||
#endif /* _TRESOR__FILE_H_ */
|
@ -16,8 +16,6 @@
|
||||
|
||||
/* tresor includes */
|
||||
#include <tresor/types.h>
|
||||
#include <tresor/module.h>
|
||||
#include <tresor/sha256_4k_hash.h>
|
||||
|
||||
namespace Tresor {
|
||||
|
||||
@ -28,470 +26,150 @@ namespace Tresor {
|
||||
|
||||
class Tresor::Free_tree_request : public Module_request
|
||||
{
|
||||
friend class Free_tree_channel;
|
||||
|
||||
public:
|
||||
|
||||
enum Type {
|
||||
INVALID = 0, ALLOC_FOR_NON_RKG = 1, ALLOC_FOR_RKG_CURR_GEN_BLKS = 2,
|
||||
ALLOC_FOR_RKG_OLD_GEN_BLKS = 3 };
|
||||
enum Type { ALLOC_FOR_NON_RKG, ALLOC_FOR_RKG_CURR_GEN_BLKS, ALLOC_FOR_RKG_OLD_GEN_BLKS, EXTENSION_STEP };
|
||||
|
||||
private:
|
||||
|
||||
friend class Free_tree;
|
||||
friend class Free_tree_channel;
|
||||
Type const _type;
|
||||
Tree_root &_ft;
|
||||
Tree_root &_mt;
|
||||
Generation const _curr_gen;
|
||||
Generation const _free_gen;
|
||||
Number_of_blocks const _num_required_pbas;
|
||||
Tree_walk_pbas &_new_blocks;
|
||||
Type_1_node_walk const &_old_blocks;
|
||||
Tree_level_index const _max_lvl;
|
||||
Virtual_block_address const _vba;
|
||||
Tree_degree const _vbd_degree;
|
||||
Virtual_block_address const _vbd_max_vba;
|
||||
bool const _rekeying;
|
||||
Key_id const _prev_key_id;
|
||||
Key_id const _curr_key_id;
|
||||
Virtual_block_address const _rekeying_vba;
|
||||
bool &_success;
|
||||
Snapshots const &_snapshots;
|
||||
Generation const _last_secured_gen;
|
||||
Physical_block_address &_pba;
|
||||
Number_of_blocks &_num_pbas;
|
||||
|
||||
Type _type { INVALID };
|
||||
addr_t _ft_root_pba_ptr { 0 };
|
||||
addr_t _ft_root_gen_ptr { 0 };
|
||||
addr_t _ft_root_hash_ptr { 0 };
|
||||
uint64_t _ft_max_level { 0 };
|
||||
uint64_t _ft_degree { 0 };
|
||||
uint64_t _ft_leaves { 0 };
|
||||
addr_t _mt_root_pba_ptr { 0 };
|
||||
addr_t _mt_root_gen_ptr { 0 };
|
||||
addr_t _mt_root_hash_ptr { 0 };
|
||||
uint64_t _mt_max_level { 0 };
|
||||
uint64_t _mt_degree { 0 };
|
||||
uint64_t _mt_leaves { 0 };
|
||||
uint64_t _current_gen { 0 };
|
||||
uint64_t _free_gen { 0 };
|
||||
uint64_t _requested_blocks { 0 };
|
||||
addr_t _new_blocks_ptr { 0 };
|
||||
addr_t _old_blocks_ptr { 0 };
|
||||
uint64_t _max_level { 0 };
|
||||
uint64_t _vba { INVALID_VBA };
|
||||
uint64_t _vbd_degree { 0 };
|
||||
uint64_t _vbd_highest_vba { 0 };
|
||||
bool _rekeying { 0 };
|
||||
uint32_t _previous_key_id { 0 };
|
||||
uint32_t _current_key_id { 0 };
|
||||
uint64_t _rekeying_vba { 0 };
|
||||
bool _success { false };
|
||||
addr_t _snapshots_ptr { 0 };
|
||||
Generation _last_secured_generation { INVALID_GENERATION };
|
||||
NONCOPYABLE(Free_tree_request);
|
||||
|
||||
public:
|
||||
|
||||
Free_tree_request() { }
|
||||
Free_tree_request(Module_id, Module_channel_id, Type, Tree_root &, Tree_root &, Snapshots const &,
|
||||
Generation, Generation, Generation, Number_of_blocks, Tree_walk_pbas &, Type_1_node_walk const &,
|
||||
Tree_level_index, Virtual_block_address, Tree_degree, Virtual_block_address,
|
||||
bool, Key_id, Key_id, Virtual_block_address, Physical_block_address &, Number_of_blocks &, bool &);
|
||||
|
||||
Free_tree_request(uint64_t src_module_id,
|
||||
uint64_t src_request_id,
|
||||
size_t req_type,
|
||||
addr_t ft_root_pba_ptr,
|
||||
addr_t ft_root_gen_ptr,
|
||||
addr_t ft_root_hash_ptr,
|
||||
uint64_t ft_max_level,
|
||||
uint64_t ft_degree,
|
||||
uint64_t ft_leaves,
|
||||
addr_t mt_root_pba_ptr,
|
||||
addr_t mt_root_gen_ptr,
|
||||
addr_t mt_root_hash_ptr,
|
||||
uint64_t mt_max_level,
|
||||
uint64_t mt_degree,
|
||||
uint64_t mt_leaves,
|
||||
Snapshots const *snapshots,
|
||||
Generation last_secured_generation,
|
||||
uint64_t current_gen,
|
||||
uint64_t free_gen,
|
||||
uint64_t requested_blocks,
|
||||
addr_t new_blocks_ptr,
|
||||
addr_t old_blocks_ptr,
|
||||
uint64_t max_level,
|
||||
uint64_t vba,
|
||||
uint64_t vbd_degree,
|
||||
uint64_t vbd_highest_vba,
|
||||
bool rekeying,
|
||||
uint32_t previous_key_id,
|
||||
uint32_t current_key_id,
|
||||
uint64_t rekeying_vba);
|
||||
|
||||
Type type() const { return _type; }
|
||||
|
||||
bool success() const { return _success; }
|
||||
|
||||
static char const *type_to_string(Type type);
|
||||
|
||||
char const *type_name() const { return type_to_string(_type); }
|
||||
|
||||
|
||||
/********************
|
||||
** Module_request **
|
||||
********************/
|
||||
static char const *type_to_string(Type);
|
||||
|
||||
void print(Output &out) const override { Genode::print(out, type_to_string(_type)); }
|
||||
};
|
||||
|
||||
|
||||
class Tresor::Free_tree_channel
|
||||
class Tresor::Free_tree_channel : public Module_channel
|
||||
{
|
||||
private:
|
||||
|
||||
friend class Free_tree;
|
||||
|
||||
using Request = Free_tree_request;
|
||||
|
||||
enum State {
|
||||
INVALID,
|
||||
SCAN,
|
||||
SCAN_COMPLETE,
|
||||
UPDATE,
|
||||
UPDATE_COMPLETE,
|
||||
COMPLETE,
|
||||
NOT_ENOUGH_FREE_BLOCKS,
|
||||
TREE_HASH_MISMATCH
|
||||
};
|
||||
REQ_SUBMITTED, REQ_GENERATED, SEEK_DOWN, SEEK_LEFT_OR_UP, WRITE_BLK, READ_BLK_SUCCEEDED,
|
||||
ALLOC_PBA_SUCCEEDED, WRITE_BLK_SUCCEEDED, REQ_COMPLETE };
|
||||
|
||||
struct Type_1_info
|
||||
Request *_req_ptr { nullptr };
|
||||
State _state { REQ_COMPLETE };
|
||||
Virtual_block_address _vba { };
|
||||
Tree_walk_pbas _old_pbas { };
|
||||
Tree_walk_pbas _new_pbas { };
|
||||
Tree_walk_generations _old_generations { };
|
||||
Number_of_leaves _num_leaves { 0 };
|
||||
Physical_block_address _alloc_pba { 0 };
|
||||
Tree_level_index _alloc_lvl { 0 };
|
||||
Number_of_blocks _num_pbas { 0 };
|
||||
Block _blk { };
|
||||
Tree_node_index _node_idx[TREE_MAX_NR_OF_LEVELS] { };
|
||||
bool _apply_allocation { false };
|
||||
Type_1_node_block _t1_blks[TREE_MAX_NR_OF_LEVELS] { };
|
||||
Type_2_node_block _t2_blk { };
|
||||
Tree_degree_log_2 _vbd_degree_log_2 { 0 };
|
||||
Tree_level_index _lvl { 0 };
|
||||
bool _generated_req_success { false };
|
||||
|
||||
NONCOPYABLE(Free_tree_channel);
|
||||
|
||||
void _generated_req_completed(State_uint) override;
|
||||
|
||||
template <typename REQUEST, typename... ARGS>
|
||||
void _generate_req(State_uint state, bool &progress, ARGS &&... args)
|
||||
{
|
||||
enum State {
|
||||
INVALID, AVAILABLE, READ, WRITE, COMPLETE };
|
||||
|
||||
State state { INVALID };
|
||||
Type_1_node node { };
|
||||
Tree_node_index index { INVALID_NODE_INDEX };
|
||||
bool volatil { false };
|
||||
};
|
||||
|
||||
struct Type_2_info
|
||||
{
|
||||
enum State {
|
||||
INVALID, AVAILABLE, READ, WRITE, COMPLETE };
|
||||
|
||||
State state { INVALID };
|
||||
Type_2_node node { };
|
||||
Tree_node_index index { INVALID_NODE_INDEX };
|
||||
};
|
||||
|
||||
struct Local_cache_request
|
||||
{
|
||||
enum State { INVALID, PENDING, IN_PROGRESS, COMPLETE };
|
||||
enum Op { READ, WRITE, SYNC };
|
||||
|
||||
State state { INVALID };
|
||||
Op op { READ };
|
||||
bool success { false };
|
||||
uint64_t pba { 0 };
|
||||
uint64_t level { 0 };
|
||||
};
|
||||
|
||||
struct Local_meta_tree_request
|
||||
{
|
||||
enum State { INVALID, PENDING, IN_PROGRESS, COMPLETE };
|
||||
enum Op { READ, WRITE, SYNC };
|
||||
|
||||
State state { INVALID };
|
||||
Op op { READ };
|
||||
uint64_t pba { 0 };
|
||||
};
|
||||
|
||||
class Type_1_info_stack {
|
||||
|
||||
private:
|
||||
|
||||
enum { MIN = 1, MAX = TREE_MAX_DEGREE, };
|
||||
|
||||
Type_1_info _container[MAX + 1] { };
|
||||
uint64_t _top { MIN - 1 };
|
||||
|
||||
public:
|
||||
|
||||
bool empty() const { return _top < MIN; }
|
||||
|
||||
bool full() const { return _top >= MAX; }
|
||||
|
||||
Type_1_info peek_top() const
|
||||
{
|
||||
if (empty()) {
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
return _container[_top];
|
||||
}
|
||||
|
||||
void reset() { _top = MIN - 1; }
|
||||
|
||||
void pop()
|
||||
{
|
||||
if (empty()) {
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
_top--;
|
||||
}
|
||||
|
||||
void push(Type_1_info val)
|
||||
{
|
||||
if (full()) {
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
_top++;
|
||||
_container[_top] = val;
|
||||
}
|
||||
|
||||
void update_top(Type_1_info val)
|
||||
{
|
||||
if (empty()) {
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
_container[_top] = val;
|
||||
}
|
||||
};
|
||||
|
||||
class Type_2_info_stack {
|
||||
|
||||
private:
|
||||
|
||||
enum { MIN = 1, MAX = TREE_MAX_DEGREE, };
|
||||
|
||||
Type_2_info _container[MAX + 1] { };
|
||||
uint64_t _top { MIN - 1 };
|
||||
|
||||
public:
|
||||
|
||||
bool empty() const { return _top < MIN; }
|
||||
|
||||
bool full() const { return _top >= MAX; }
|
||||
|
||||
Type_2_info peek_top() const
|
||||
{
|
||||
if (empty()) {
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
return _container[_top];
|
||||
}
|
||||
|
||||
void reset() { _top = MIN - 1; }
|
||||
|
||||
void pop()
|
||||
{
|
||||
if (empty()) {
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
_top--;
|
||||
}
|
||||
|
||||
void push(Type_2_info val)
|
||||
{
|
||||
if (full()) {
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
_top++;
|
||||
_container[_top] = val;
|
||||
}
|
||||
|
||||
void update_top(Type_2_info val)
|
||||
{
|
||||
if (empty()) {
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
_container[_top] = val;
|
||||
}
|
||||
};
|
||||
|
||||
class Node_queue
|
||||
{
|
||||
private:
|
||||
|
||||
enum {
|
||||
FIRST_CONTAINER_IDX = 1,
|
||||
MAX_CONTAINER_IDX = TREE_MAX_DEGREE,
|
||||
MAX_USED_VALUE = TREE_MAX_DEGREE - 1,
|
||||
FIRST_USED_VALUE = 0,
|
||||
};
|
||||
|
||||
uint64_t _head { FIRST_CONTAINER_IDX };
|
||||
uint64_t _tail { FIRST_CONTAINER_IDX };
|
||||
Type_2_info _container[MAX_CONTAINER_IDX + 1] { };
|
||||
uint64_t _used { FIRST_USED_VALUE };
|
||||
|
||||
public:
|
||||
|
||||
void enqueue(Type_2_info const &node)
|
||||
{
|
||||
_container[_tail] = node;
|
||||
if (_tail < MAX_CONTAINER_IDX)
|
||||
_tail++;
|
||||
else
|
||||
_tail = FIRST_CONTAINER_IDX;
|
||||
|
||||
_used++;
|
||||
}
|
||||
|
||||
void dequeue_head()
|
||||
{
|
||||
if (_head < MAX_CONTAINER_IDX)
|
||||
_head++;
|
||||
else
|
||||
_head = FIRST_CONTAINER_IDX;
|
||||
|
||||
_used--;
|
||||
}
|
||||
|
||||
Type_2_info const &head() const { return _container[_head]; }
|
||||
|
||||
bool empty() const { return _used == FIRST_USED_VALUE; };
|
||||
|
||||
bool full() const { return _used == MAX_USED_VALUE; };
|
||||
};
|
||||
|
||||
State _state { INVALID };
|
||||
Request _request { };
|
||||
uint64_t _needed_blocks { 0 };
|
||||
uint64_t _found_blocks { 0 };
|
||||
uint64_t _exchanged_blocks { 0 };
|
||||
Local_meta_tree_request _meta_tree_request { };
|
||||
Local_cache_request _cache_request { };
|
||||
Block _cache_block_data { };
|
||||
Type_1_info_stack _level_n_stacks[TREE_MAX_NR_OF_LEVELS] { };
|
||||
Type_2_info_stack _level_0_stack { };
|
||||
Type_1_node_block _level_n_nodes[TREE_MAX_NR_OF_LEVELS] { };
|
||||
Type_1_node_block _level_n_node { };
|
||||
Type_2_node_block _level_0_node { };
|
||||
Node_queue _type_2_leafs { };
|
||||
uint64_t _vbd_degree_log_2 { 0 };
|
||||
bool _wb_data_prim_success { false };
|
||||
|
||||
Type_1_node _root_node() const
|
||||
{
|
||||
Type_1_node node { };
|
||||
node.pba = *(Physical_block_address *)_request._ft_root_pba_ptr;
|
||||
node.gen = *(Generation *)_request._ft_root_gen_ptr;
|
||||
memcpy(&node.hash, (void *)_request._ft_root_hash_ptr, HASH_SIZE);
|
||||
return node;
|
||||
_state = REQ_GENERATED;
|
||||
generate_req<REQUEST>(state, progress, args..., _generated_req_success);
|
||||
}
|
||||
|
||||
void _request_submitted(Module_request &) override;
|
||||
|
||||
bool _request_complete() override { return _state == REQ_COMPLETE; }
|
||||
|
||||
void _mark_req_failed(bool &, char const *);
|
||||
|
||||
bool _can_alloc_pba_of(Type_2_node &);
|
||||
|
||||
void _alloc_pba_of(Type_2_node &);
|
||||
|
||||
void _traverse_curr_node(bool &);
|
||||
|
||||
void _mark_req_successful(bool &);
|
||||
|
||||
void _start_tree_traversal(bool &);
|
||||
|
||||
void _advance_to_next_node();
|
||||
|
||||
void _add_new_branch_at(Tree_level_index, Tree_node_index);
|
||||
|
||||
void _add_new_root_lvl();
|
||||
|
||||
void _generate_write_blk_req(bool &);
|
||||
|
||||
void _extension_step(bool &);
|
||||
|
||||
void _alloc_pbas(bool &);
|
||||
|
||||
public:
|
||||
|
||||
Free_tree_channel(Module_channel_id id) : Module_channel { FREE_TREE, id } { }
|
||||
|
||||
void execute(bool &);
|
||||
};
|
||||
|
||||
class Tresor::Free_tree : public Module
|
||||
{
|
||||
private:
|
||||
|
||||
using Request = Free_tree_request;
|
||||
using Channel = Free_tree_channel;
|
||||
using Local_cache_request = Channel::Local_cache_request;
|
||||
using Local_meta_tree_request = Channel::Local_meta_tree_request;
|
||||
using Type_1_info = Channel::Type_1_info;
|
||||
using Type_2_info = Channel::Type_2_info;
|
||||
using Type_1_info_stack = Channel::Type_1_info_stack;
|
||||
using Type_2_info_stack = Channel::Type_2_info_stack;
|
||||
using Node_queue = Channel::Node_queue;
|
||||
using Request = Free_tree_request;
|
||||
|
||||
enum { FIRST_LVL_N_STACKS_IDX = 1 };
|
||||
enum { MAX_LVL_N_STACKS_IDX = TREE_MAX_LEVEL };
|
||||
enum { FIRST_LVL_N_NODES_IDX = 1 };
|
||||
enum { NR_OF_CHANNELS = 1 };
|
||||
Constructible<Channel> _channels[1] { };
|
||||
|
||||
Channel _channels[NR_OF_CHANNELS] { };
|
||||
|
||||
void _reset_block_state(Channel &chan);
|
||||
|
||||
static Local_meta_tree_request
|
||||
_new_meta_tree_request(Physical_block_address pba);
|
||||
|
||||
void _update_upper_n_stack(Type_1_info const &t,
|
||||
Generation gen,
|
||||
Block const &block_data,
|
||||
Type_1_node_block &entries);
|
||||
|
||||
void _mark_req_failed(Channel &chan,
|
||||
bool &progress,
|
||||
char const *str);
|
||||
|
||||
void _mark_req_successful(Channel &chan,
|
||||
bool &progress);
|
||||
|
||||
void
|
||||
_exchange_type_2_leaves(Generation free_gen,
|
||||
Tree_level_index max_level,
|
||||
Type_1_node_walk const &old_blocks,
|
||||
Tree_walk_pbas &new_blocks,
|
||||
Virtual_block_address vba,
|
||||
Tree_degree_log_2 vbd_degree_log_2,
|
||||
Request::Type req_type,
|
||||
Type_2_info_stack &stack,
|
||||
Type_2_node_block &entries,
|
||||
Number_of_blocks &exchanged,
|
||||
bool &handled,
|
||||
Virtual_block_address vbd_highest_vba,
|
||||
bool rekeying,
|
||||
Key_id previous_key_id,
|
||||
Key_id current_key_id,
|
||||
Virtual_block_address rekeying_vba);
|
||||
|
||||
void _populate_lower_n_stack(Type_1_info_stack &stack,
|
||||
Type_1_node_block &entries,
|
||||
Block const &block_data,
|
||||
Generation current_gen);
|
||||
|
||||
bool
|
||||
_check_type_2_leaf_usable(Snapshots const &snapshots,
|
||||
Generation last_secured_gen,
|
||||
Type_2_node const &node,
|
||||
bool rekeying,
|
||||
Key_id previous_key_id,
|
||||
Virtual_block_address rekeying_vba);
|
||||
|
||||
void _populate_level_0_stack(Type_2_info_stack &stack,
|
||||
Type_2_node_block &entries,
|
||||
Block const &block_data,
|
||||
Snapshots const &active_snaps,
|
||||
Generation secured_gen,
|
||||
bool rekeying,
|
||||
Key_id previous_key_id,
|
||||
Virtual_block_address rekeying_vba);
|
||||
|
||||
void _execute_update(Channel &chan,
|
||||
Snapshots const &active_snaps,
|
||||
Generation last_secured_gen,
|
||||
bool &progress);
|
||||
|
||||
bool _node_volatile(Type_1_node const &node,
|
||||
uint64_t gen);
|
||||
|
||||
void _execute_scan(Channel &chan,
|
||||
Snapshots const &active_snaps,
|
||||
Generation last_secured_gen,
|
||||
bool &progress);
|
||||
|
||||
void _execute(Channel &chan,
|
||||
Snapshots const &active_snaps,
|
||||
Generation last_secured_gen,
|
||||
bool &progress);
|
||||
|
||||
void _check_type_2_stack(Type_2_info_stack &stack,
|
||||
Type_1_info_stack &stack_next,
|
||||
Node_queue &leaves,
|
||||
Number_of_blocks &found);
|
||||
|
||||
Local_cache_request _new_cache_request(Physical_block_address pba,
|
||||
Local_cache_request::Op op,
|
||||
Tree_level_index lvl);
|
||||
|
||||
/************
|
||||
** Module **
|
||||
************/
|
||||
|
||||
bool ready_to_submit_request() override;
|
||||
|
||||
void submit_request(Module_request &req) override;
|
||||
|
||||
bool _peek_completed_request(uint8_t *buf_ptr,
|
||||
size_t buf_size) override;
|
||||
|
||||
void _drop_completed_request(Module_request &req) override;
|
||||
NONCOPYABLE(Free_tree);
|
||||
|
||||
void execute(bool &) override;
|
||||
|
||||
bool _peek_generated_request(uint8_t *buf_ptr,
|
||||
size_t buf_size) override;
|
||||
public:
|
||||
|
||||
void _drop_generated_request(Module_request &mod_req) override;
|
||||
struct Extension_step : Request
|
||||
{
|
||||
Extension_step(Module_id mod_id, Module_channel_id chan_id, Generation curr_gen, Tree_root &ft, Tree_root &mt,
|
||||
Physical_block_address &pba, Number_of_blocks &num_pbas, bool &succ)
|
||||
: Request(mod_id, chan_id, Request::EXTENSION_STEP, ft, mt, *(Snapshots *)0, 0, curr_gen, 0, 0, *(Tree_walk_pbas*)0,
|
||||
*(Type_1_node_walk*)0, 0, 0, 0, 0, 0, 0, 0, 0, pba, num_pbas, succ) { }
|
||||
};
|
||||
|
||||
void generated_request_complete(Module_request &req) override;
|
||||
Free_tree();
|
||||
};
|
||||
|
||||
#endif /* _TRESOR__FREE_TREE_H_ */
|
||||
|
@ -14,12 +14,8 @@
|
||||
#ifndef _TRESOR__FT_CHECK_H_
|
||||
#define _TRESOR__FT_CHECK_H_
|
||||
|
||||
/* base includes */
|
||||
#include <base/output.h>
|
||||
|
||||
/* tresor includes */
|
||||
#include <tresor/types.h>
|
||||
#include <tresor/module.h>
|
||||
|
||||
namespace Tresor {
|
||||
|
||||
@ -31,107 +27,66 @@ namespace Tresor {
|
||||
|
||||
class Tresor::Ft_check_request : public Module_request
|
||||
{
|
||||
public:
|
||||
|
||||
enum Type { INVALID = 0, CHECK = 1, };
|
||||
friend class Ft_check_channel;
|
||||
|
||||
private:
|
||||
|
||||
friend class Ft_check;
|
||||
friend class Ft_check_channel;
|
||||
Tree_root const &_ft;
|
||||
bool &_success;
|
||||
|
||||
Type _type { INVALID };
|
||||
Tree_level_index _max_lvl { 0 };
|
||||
Tree_node_index _max_child_idx { 0 };
|
||||
Number_of_leaves _nr_of_leaves { 0 };
|
||||
Type_1_node _root { };
|
||||
bool _success { false };
|
||||
NONCOPYABLE(Ft_check_request);
|
||||
|
||||
public:
|
||||
|
||||
Ft_check_request() { }
|
||||
Ft_check_request(Module_id, Module_channel_id, Tree_root const &, bool &);
|
||||
|
||||
Ft_check_request(uint64_t src_module_id,
|
||||
uint64_t src_request_id,
|
||||
Type type,
|
||||
Tree_level_index max_lvl,
|
||||
Tree_node_index max_child_idx,
|
||||
Number_of_leaves nr_of_leaves,
|
||||
Type_1_node root);
|
||||
|
||||
Type type() const { return _type; }
|
||||
|
||||
bool success() const { return _success; }
|
||||
|
||||
static char const *type_to_string(Type type);
|
||||
|
||||
|
||||
/********************
|
||||
** Module_request **
|
||||
********************/
|
||||
|
||||
void print(Output &out) const override
|
||||
{
|
||||
Genode::print(out, type_to_string(_type), " root ", _root);
|
||||
}
|
||||
void print(Output &out) const override { Genode::print(out, "check ", _ft); }
|
||||
};
|
||||
|
||||
|
||||
class Tresor::Ft_check_channel
|
||||
class Tresor::Ft_check_channel : public Module_channel
|
||||
{
|
||||
private:
|
||||
|
||||
friend class Ft_check;
|
||||
|
||||
using Request = Ft_check_request;
|
||||
|
||||
enum Child_state {
|
||||
READ_BLOCK = 0, CHECK_HASH = 1, DONE = 2 };
|
||||
enum State : State_uint { REQ_SUBMITTED, REQ_IN_PROGRESS, REQ_COMPLETE, REQ_GENERATED, READ_BLK_SUCCEEDED };
|
||||
|
||||
struct Type_1_level
|
||||
State _state { REQ_COMPLETE };
|
||||
Type_1_node_block_walk _t1_blks { };
|
||||
Type_2_node_block _t2_blk { };
|
||||
bool _check_node[TREE_MAX_NR_OF_LEVELS + 1][NUM_NODES_PER_BLK] { };
|
||||
Number_of_leaves _num_remaining_leaves { 0 };
|
||||
Request *_req_ptr { };
|
||||
Block _blk { };
|
||||
bool _generated_req_success { false };
|
||||
|
||||
NONCOPYABLE(Ft_check_channel);
|
||||
|
||||
void _generated_req_completed(State_uint) override;
|
||||
|
||||
void _request_submitted(Module_request &) override;
|
||||
|
||||
bool _request_complete() override { return _state == REQ_COMPLETE; }
|
||||
|
||||
void _mark_req_failed(bool &, Error_string);
|
||||
|
||||
void _mark_req_successful(bool &);
|
||||
|
||||
bool _execute_node(Tree_level_index, Tree_node_index, bool &);
|
||||
|
||||
template <typename REQUEST, typename... ARGS>
|
||||
void _generate_req(State_uint state, bool &progress, ARGS &&... args)
|
||||
{
|
||||
Child_state children_state[NR_OF_T1_NODES_PER_BLK] { };
|
||||
Type_1_node_block children { };
|
||||
_state = REQ_GENERATED;
|
||||
generate_req<REQUEST>(state, progress, args..., _generated_req_success);
|
||||
}
|
||||
|
||||
Type_1_level()
|
||||
{
|
||||
for (Child_state &state : children_state)
|
||||
state = DONE;
|
||||
}
|
||||
};
|
||||
public:
|
||||
|
||||
struct Type_2_level
|
||||
{
|
||||
Child_state children_state[NR_OF_T1_NODES_PER_BLK] { };
|
||||
Type_2_node_block children { };
|
||||
Ft_check_channel(Module_channel_id id) : Module_channel { FT_CHECK, id } { }
|
||||
|
||||
Type_2_level()
|
||||
{
|
||||
for (Child_state &state : children_state)
|
||||
state = DONE;
|
||||
}
|
||||
};
|
||||
|
||||
enum Primitive_tag { INVALID, BLOCK_IO };
|
||||
|
||||
struct Generated_primitive
|
||||
{
|
||||
bool success { false };
|
||||
Primitive_tag tag { INVALID };
|
||||
Physical_block_address blk_nr { 0 };
|
||||
bool dropped { false };
|
||||
|
||||
bool valid() const { return tag != INVALID; }
|
||||
};
|
||||
|
||||
Generated_primitive _gen_prim { };
|
||||
Tree_level_index _lvl_to_read { 0 };
|
||||
Child_state _root_state { DONE };
|
||||
Type_2_level _t2_lvl { };
|
||||
Type_1_level _t1_lvls[TREE_MAX_LEVEL] { };
|
||||
Number_of_leaves _nr_of_leaves { 0 };
|
||||
Request _request { };
|
||||
Block _encoded_blk { };
|
||||
void execute(bool &);
|
||||
};
|
||||
|
||||
|
||||
@ -139,74 +94,17 @@ class Tresor::Ft_check : public Module
|
||||
{
|
||||
private:
|
||||
|
||||
using Request = Ft_check_request;
|
||||
using Channel = Ft_check_channel;
|
||||
using Child_state = Ft_check_channel::Child_state;
|
||||
using Type_1_level = Ft_check_channel::Type_1_level;
|
||||
using Type_2_level = Ft_check_channel::Type_2_level;
|
||||
|
||||
enum { NR_OF_CHANNELS = 1 };
|
||||
|
||||
Channel _channels[NR_OF_CHANNELS] { };
|
||||
|
||||
void _execute_inner_t2_child(Channel &chan,
|
||||
Tree_level_index lvl,
|
||||
Tree_node_index child_idx,
|
||||
bool &progress);
|
||||
|
||||
void _execute_check(Channel &channel,
|
||||
bool &progress);
|
||||
|
||||
void _mark_req_failed(Channel &channel,
|
||||
bool &progress,
|
||||
char const *str);
|
||||
|
||||
void _mark_req_successful(Channel &channel,
|
||||
bool &progress);
|
||||
|
||||
void _execute_inner_t1_child(Channel &chan,
|
||||
Type_1_node const &child,
|
||||
Type_1_level &child_lvl,
|
||||
Child_state &child_state,
|
||||
Tree_level_index lvl,
|
||||
Tree_node_index child_idx,
|
||||
bool &progress);
|
||||
|
||||
|
||||
void _execute_leaf_child(Channel &chan,
|
||||
Tree_node_index child_idx,
|
||||
bool &progress);
|
||||
|
||||
|
||||
/************
|
||||
** Module **
|
||||
************/
|
||||
|
||||
bool _peek_completed_request(uint8_t *buf_ptr,
|
||||
size_t buf_size) override;
|
||||
|
||||
void _drop_completed_request(Module_request &req) override;
|
||||
|
||||
bool _peek_generated_request(uint8_t *buf_ptr,
|
||||
size_t buf_size) override;
|
||||
|
||||
void _drop_generated_request(Module_request &mod_req) override;
|
||||
|
||||
void generated_request_complete(Module_request &req) override;
|
||||
Constructible<Channel> _channels[1] { };
|
||||
|
||||
NONCOPYABLE(Ft_check);
|
||||
|
||||
public:
|
||||
|
||||
/************
|
||||
** Module **
|
||||
************/
|
||||
|
||||
bool ready_to_submit_request() override;
|
||||
|
||||
void submit_request(Module_request &req) override;
|
||||
Ft_check();
|
||||
|
||||
void execute(bool &) override;
|
||||
|
||||
};
|
||||
|
||||
#endif /* _TRESOR__FT_CHECK_H_ */
|
||||
|
@ -1,5 +1,6 @@
|
||||
/*
|
||||
* \brief Module for initializing the FT
|
||||
* \author Martin Stein
|
||||
* \author Josef Soentgen
|
||||
* \date 2023-03-09
|
||||
*/
|
||||
@ -14,11 +15,8 @@
|
||||
#ifndef _TRESOR__FT_INITIALIZER_H_
|
||||
#define _TRESOR__FT_INITIALIZER_H_
|
||||
|
||||
/* base includes */
|
||||
#include <base/output.h>
|
||||
|
||||
/* tresor includes */
|
||||
#include <tresor/module.h>
|
||||
#include <tresor/types.h>
|
||||
|
||||
namespace Tresor {
|
||||
|
||||
@ -30,146 +28,67 @@ namespace Tresor {
|
||||
|
||||
class Tresor::Ft_initializer_request : public Module_request
|
||||
{
|
||||
public:
|
||||
|
||||
enum Type { INVALID = 0, INIT = 1, };
|
||||
friend class Ft_initializer_channel;
|
||||
|
||||
private:
|
||||
|
||||
friend class Ft_initializer;
|
||||
friend class Ft_initializer_channel;
|
||||
|
||||
Type _type { INVALID };
|
||||
uint8_t _root_node[sizeof(Type_1_node)] { 0 };
|
||||
uint64_t _max_level_idx { 0 };
|
||||
uint64_t _max_child_idx { 0 };
|
||||
uint64_t _nr_of_leaves { 0 };
|
||||
bool _success { false };
|
||||
Tree_root &_ft;
|
||||
Pba_allocator &_pba_alloc;
|
||||
bool &_success;
|
||||
|
||||
NONCOPYABLE(Ft_initializer_request);
|
||||
|
||||
public:
|
||||
|
||||
Ft_initializer_request() { }
|
||||
Ft_initializer_request(Module_id, Module_channel_id, Tree_root &, Pba_allocator &, bool &);
|
||||
|
||||
Ft_initializer_request(Module_id src_module_id,
|
||||
Module_request_id src_request_id);
|
||||
|
||||
static void create(void *buf_ptr,
|
||||
size_t buf_size,
|
||||
uint64_t src_module_id,
|
||||
uint64_t src_request_id,
|
||||
size_t req_type,
|
||||
uint64_t max_level_idx,
|
||||
uint64_t max_child_idx,
|
||||
uint64_t nr_of_leaves);
|
||||
|
||||
void *root_node() { return _root_node; }
|
||||
|
||||
Type type() const { return _type; }
|
||||
|
||||
bool success() const { return _success; }
|
||||
|
||||
static char const *type_to_string(Type type);
|
||||
|
||||
|
||||
/********************
|
||||
** Module_request **
|
||||
********************/
|
||||
|
||||
void print(Output &out) const override { Genode::print(out, type_to_string(_type)); }
|
||||
void print(Output &out) const override { Genode::print(out, "init"); }
|
||||
};
|
||||
|
||||
|
||||
class Tresor::Ft_initializer_channel
|
||||
class Tresor::Ft_initializer_channel : public Module_channel
|
||||
{
|
||||
private:
|
||||
|
||||
friend class Ft_initializer;
|
||||
using Request = Ft_initializer_request;
|
||||
|
||||
enum State {
|
||||
INACTIVE, SUBMITTED, PENDING, IN_PROGRESS, COMPLETE,
|
||||
BLOCK_ALLOC_PENDING,
|
||||
BLOCK_ALLOC_IN_PROGRESS,
|
||||
BLOCK_ALLOC_COMPLETE,
|
||||
BLOCK_IO_PENDING,
|
||||
BLOCK_IO_IN_PROGRESS,
|
||||
BLOCK_IO_COMPLETE,
|
||||
};
|
||||
enum State { REQ_GENERATED, REQ_SUBMITTED, EXECUTE_NODES, REQ_COMPLETE };
|
||||
|
||||
enum Child_state { DONE, INIT_BLOCK, INIT_NODE, WRITE_BLOCK, };
|
||||
enum Node_state { DONE, INIT_BLOCK, INIT_NODE, WRITE_BLK };
|
||||
|
||||
struct Type_1_level
|
||||
{
|
||||
Type_1_node_block children { };
|
||||
Child_state children_state[NR_OF_T1_NODES_PER_BLK] { DONE };
|
||||
};
|
||||
State _state { REQ_COMPLETE };
|
||||
Request *_req_ptr { };
|
||||
Type_2_node_block _t2_blk { };
|
||||
Type_1_node_block_walk _t1_blks { };
|
||||
Node_state _t1_node_states[TREE_MAX_NR_OF_LEVELS][NUM_NODES_PER_BLK] { };
|
||||
Node_state _t2_node_states[NUM_NODES_PER_BLK] { };
|
||||
Number_of_leaves _num_remaining_leaves { 0 };
|
||||
bool _generated_req_success { false };
|
||||
Block _blk { };
|
||||
|
||||
struct Type_2_level
|
||||
{
|
||||
Type_2_node_block children { };
|
||||
Child_state children_state[NR_OF_T2_NODES_PER_BLK] { DONE };
|
||||
};
|
||||
NONCOPYABLE(Ft_initializer_channel);
|
||||
|
||||
struct Root_node
|
||||
{
|
||||
Type_1_node node { };
|
||||
Child_state state { DONE };
|
||||
};
|
||||
void _reset_level(Tree_level_index, Node_state);
|
||||
|
||||
State _state { INACTIVE };
|
||||
Ft_initializer_request _request { };
|
||||
Root_node _root_node { };
|
||||
Type_1_level _t1_levels[TREE_MAX_LEVEL] { };
|
||||
Type_2_level _t2_level { };
|
||||
uint64_t _level_to_write { 0 };
|
||||
uint64_t _blk_nr { 0 };
|
||||
uint64_t _child_pba { 0 };
|
||||
bool _generated_req_success { false };
|
||||
Block _encoded_blk { };
|
||||
void _generated_req_completed(State_uint) override;
|
||||
|
||||
static void reset_node(Tresor::Type_1_node &node)
|
||||
{
|
||||
memset(&node, 0, sizeof(Type_1_node));
|
||||
}
|
||||
bool _request_complete() override { return _state == REQ_COMPLETE; }
|
||||
|
||||
static void reset_node(Tresor::Type_2_node &node)
|
||||
{
|
||||
memset(&node, 0, sizeof(Type_2_node));
|
||||
}
|
||||
void _request_submitted(Module_request &) override;
|
||||
|
||||
static void reset_level(Type_1_level &level,
|
||||
Child_state state)
|
||||
{
|
||||
for (unsigned int i = 0; i < NR_OF_T1_NODES_PER_BLK; i++) {
|
||||
reset_node(level.children.nodes[i]);
|
||||
level.children_state[i] = state;
|
||||
}
|
||||
}
|
||||
bool _execute_t2_node(Tree_node_index, bool &);
|
||||
|
||||
static void reset_level(Type_2_level &level,
|
||||
Child_state state)
|
||||
{
|
||||
for (unsigned int i = 0; i < NR_OF_T2_NODES_PER_BLK; i++) {
|
||||
reset_node(level.children.nodes[i]);
|
||||
level.children_state[i] = state;
|
||||
}
|
||||
}
|
||||
bool _execute_t1_node(Tree_level_index, Tree_node_index, bool &);
|
||||
|
||||
static void dump(Type_1_node_block const &node_block)
|
||||
{
|
||||
for (auto v : node_block.nodes) {
|
||||
if (v.pba != 0)
|
||||
log(v);
|
||||
}
|
||||
}
|
||||
void _mark_req_failed(bool &, char const *);
|
||||
|
||||
static void dump(Type_2_node_block const &node_block)
|
||||
{
|
||||
for (auto v : node_block.nodes) {
|
||||
if (v.pba != 0)
|
||||
log(v);
|
||||
}
|
||||
}
|
||||
void _mark_req_successful(bool &);
|
||||
|
||||
public:
|
||||
|
||||
Ft_initializer_channel(Module_channel_id id) : Module_channel { FT_INITIALIZER, id } { }
|
||||
|
||||
void execute(bool &);
|
||||
};
|
||||
|
||||
|
||||
@ -177,83 +96,16 @@ class Tresor::Ft_initializer : public Module
|
||||
{
|
||||
private:
|
||||
|
||||
using Request = Ft_initializer_request;
|
||||
using Channel = Ft_initializer_channel;
|
||||
|
||||
enum { NR_OF_CHANNELS = 1 };
|
||||
|
||||
Channel _channels[NR_OF_CHANNELS] { };
|
||||
|
||||
void _execute_leaf_child(Channel &channel,
|
||||
bool &progress,
|
||||
uint64_t &nr_of_leaves,
|
||||
Tresor::Type_2_node &child,
|
||||
Ft_initializer_channel::Child_state &child_state,
|
||||
uint64_t child_index);
|
||||
|
||||
void _execute_inner_t2_child(Channel &channel,
|
||||
bool &progress,
|
||||
uint64_t nr_of_leaves,
|
||||
uint64_t &level_to_write,
|
||||
Tresor::Type_1_node &child,
|
||||
Ft_initializer_channel::Type_2_level &child_level,
|
||||
Ft_initializer_channel::Child_state &child_state,
|
||||
uint64_t level_index,
|
||||
uint64_t child_index);
|
||||
|
||||
void _execute_inner_t1_child(Channel &channel,
|
||||
bool &progress,
|
||||
uint64_t nr_of_leaves,
|
||||
uint64_t &level_to_write,
|
||||
Tresor::Type_1_node &child,
|
||||
Ft_initializer_channel::Type_1_level &child_level,
|
||||
Ft_initializer_channel::Child_state &child_state,
|
||||
uint64_t level_index,
|
||||
uint64_t child_index);
|
||||
|
||||
void _execute(Channel &channel,
|
||||
bool &progress);
|
||||
|
||||
void _execute_init(Channel &channel,
|
||||
bool &progress);
|
||||
|
||||
void _mark_req_failed(Channel &channel,
|
||||
bool &progress,
|
||||
char const *str);
|
||||
|
||||
void _mark_req_successful(Channel &channel,
|
||||
bool &progress);
|
||||
|
||||
|
||||
/************
|
||||
** Module **
|
||||
************/
|
||||
|
||||
bool _peek_completed_request(uint8_t *buf_ptr,
|
||||
size_t buf_size) override;
|
||||
|
||||
void _drop_completed_request(Module_request &req) override;
|
||||
|
||||
bool _peek_generated_request(uint8_t *buf_ptr,
|
||||
size_t buf_size) override;
|
||||
|
||||
void _drop_generated_request(Module_request &mod_req) override;
|
||||
|
||||
void generated_request_complete(Module_request &req) override;
|
||||
Constructible<Channel> _channels[1] { };
|
||||
|
||||
NONCOPYABLE(Ft_initializer);
|
||||
|
||||
public:
|
||||
|
||||
Ft_initializer();
|
||||
|
||||
/************
|
||||
** Module **
|
||||
************/
|
||||
|
||||
bool ready_to_submit_request() override;
|
||||
|
||||
void submit_request(Module_request &req) override;
|
||||
|
||||
void execute(bool &) override;
|
||||
|
||||
};
|
||||
|
@ -1,257 +0,0 @@
|
||||
/*
|
||||
* \brief Module for file tree resizing
|
||||
* \author Martin Stein
|
||||
* \date 2023-03-09
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2023 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
#ifndef _TRESOR__FT_RESIZING_H_
|
||||
#define _TRESOR__FT_RESIZING_H_
|
||||
|
||||
/* tresor includes */
|
||||
#include <tresor/module.h>
|
||||
#include <tresor/types.h>
|
||||
#include <tresor/vfs_utilities.h>
|
||||
|
||||
namespace Tresor {
|
||||
|
||||
class Ft_resizing;
|
||||
class Ft_resizing_request;
|
||||
class Ft_resizing_channel;
|
||||
}
|
||||
|
||||
|
||||
class Tresor::Ft_resizing_request : public Module_request
|
||||
{
|
||||
public:
|
||||
|
||||
enum Type { INVALID = 0, FT_EXTENSION_STEP = 1 };
|
||||
|
||||
private:
|
||||
|
||||
friend class Ft_resizing;
|
||||
friend class Ft_resizing_channel;
|
||||
|
||||
Type _type { INVALID };
|
||||
Generation _curr_gen { INVALID_GENERATION };
|
||||
Type_1_node _ft_root { };
|
||||
Tree_level_index _ft_max_lvl { 0 };
|
||||
Number_of_leaves _ft_nr_of_leaves { 0 };
|
||||
Tree_degree _ft_degree { TREE_MIN_DEGREE };
|
||||
addr_t _mt_root_pba_ptr { 0 };
|
||||
addr_t _mt_root_gen_ptr { 0 };
|
||||
addr_t _mt_root_hash_ptr { 0 };
|
||||
Tree_level_index _mt_max_level { 0 };
|
||||
Tree_degree _mt_degree { 0 };
|
||||
Number_of_leaves _mt_leaves { 0 };
|
||||
Physical_block_address _pba { 0 };
|
||||
Number_of_blocks _nr_of_pbas { 0 };
|
||||
Number_of_leaves _nr_of_leaves { 0 };
|
||||
bool _success { false };
|
||||
|
||||
public:
|
||||
|
||||
Ft_resizing_request() { }
|
||||
|
||||
Ft_resizing_request(uint64_t src_module_id,
|
||||
uint64_t src_request_id,
|
||||
Type type,
|
||||
Generation curr_gen,
|
||||
Type_1_node ft_root,
|
||||
Tree_level_index ft_max_lvl,
|
||||
Number_of_leaves ft_nr_of_leaves,
|
||||
Tree_degree ft_degree,
|
||||
addr_t mt_root_pba_ptr,
|
||||
addr_t mt_root_gen_ptr,
|
||||
addr_t mt_root_hash_ptr,
|
||||
Tree_level_index mt_max_level,
|
||||
Tree_degree mt_degree,
|
||||
Number_of_leaves mt_leaves,
|
||||
Physical_block_address pba,
|
||||
Number_of_blocks nr_of_pbas);
|
||||
|
||||
Type type() const { return _type; }
|
||||
|
||||
bool success() const { return _success; }
|
||||
|
||||
static char const *type_to_string(Type type);
|
||||
|
||||
Type_1_node ft_root() const { return _ft_root; }
|
||||
Tree_level_index ft_max_lvl() const { return _ft_max_lvl; }
|
||||
Number_of_leaves ft_nr_of_leaves() const { return _ft_nr_of_leaves; }
|
||||
Number_of_leaves nr_of_leaves() const { return _nr_of_leaves; }
|
||||
Physical_block_address pba() const { return _pba; }
|
||||
Number_of_blocks nr_of_pbas() const { return _nr_of_pbas; }
|
||||
|
||||
|
||||
/********************
|
||||
** Module_request **
|
||||
********************/
|
||||
|
||||
void print(Output &out) const override
|
||||
{
|
||||
Genode::print(out, type_to_string(_type), " root ", _ft_root, " leaves ", _ft_nr_of_leaves, " max_lvl ", _ft_max_lvl);
|
||||
}
|
||||
};
|
||||
|
||||
class Tresor::Ft_resizing_channel
|
||||
{
|
||||
private:
|
||||
|
||||
friend class Ft_resizing;
|
||||
|
||||
enum State {
|
||||
SUBMITTED,
|
||||
|
||||
READ_ROOT_NODE_PENDING,
|
||||
READ_ROOT_NODE_IN_PROGRESS,
|
||||
READ_ROOT_NODE_COMPLETED,
|
||||
|
||||
READ_INNER_NODE_PENDING,
|
||||
READ_INNER_NODE_IN_PROGRESS,
|
||||
READ_INNER_NODE_COMPLETED,
|
||||
|
||||
ALLOC_PBA_PENDING,
|
||||
ALLOC_PBA_IN_PROGRESS,
|
||||
ALLOC_PBA_COMPLETED,
|
||||
|
||||
EXTEND_MT_BY_ONE_LEAF_PENDING,
|
||||
EXTEND_MT_BY_ONE_LEAF_IN_PROGRESS,
|
||||
EXTEND_MT_BY_ONE_LEAF_COMPLETED,
|
||||
|
||||
WRITE_INNER_NODE_PENDING,
|
||||
WRITE_INNER_NODE_IN_PROGRESS,
|
||||
WRITE_INNER_NODE_COMPLETED,
|
||||
|
||||
WRITE_ROOT_NODE_PENDING,
|
||||
WRITE_ROOT_NODE_IN_PROGRESS,
|
||||
WRITE_ROOT_NODE_COMPLETED,
|
||||
|
||||
COMPLETED
|
||||
};
|
||||
|
||||
enum Tag_type
|
||||
{
|
||||
TAG_INVALID,
|
||||
TAG_FT_RSZG_CACHE,
|
||||
TAG_FT_RSZG_MT_ALLOC,
|
||||
};
|
||||
|
||||
struct Generated_prim
|
||||
{
|
||||
enum Type { READ, WRITE };
|
||||
|
||||
Type op { READ };
|
||||
bool succ { false };
|
||||
Tag_type tg { TAG_INVALID };
|
||||
uint64_t blk_nr { 0 };
|
||||
uint64_t idx { 0 };
|
||||
};
|
||||
|
||||
struct Type_1_node_blocks
|
||||
{
|
||||
Type_1_node_block items[TREE_MAX_LEVEL] { };
|
||||
};
|
||||
|
||||
struct Generations
|
||||
{
|
||||
Generation items[TREE_MAX_LEVEL + 1] { };
|
||||
};
|
||||
|
||||
Ft_resizing_request _request { };
|
||||
State _state { SUBMITTED };
|
||||
Generated_prim _generated_prim { };
|
||||
Type_1_node_blocks _t1_blks { };
|
||||
Type_2_node_block _t2_blk { };
|
||||
Tree_level_index _lvl_idx { 0 };
|
||||
Tree_level_index _alloc_lvl_idx { 0 };
|
||||
Virtual_block_address _vba { };
|
||||
Tree_walk_pbas _old_pbas { };
|
||||
Generations _old_generations { };
|
||||
Tree_walk_pbas _new_pbas { };
|
||||
Block _encoded_blk { };
|
||||
};
|
||||
|
||||
class Tresor::Ft_resizing : public Module
|
||||
{
|
||||
private:
|
||||
|
||||
using Request = Ft_resizing_request;
|
||||
using Channel = Ft_resizing_channel;
|
||||
|
||||
enum { NR_OF_CHANNELS = 1 };
|
||||
|
||||
Channel _channels[NR_OF_CHANNELS] { };
|
||||
|
||||
void _set_args_for_write_back_of_inner_lvl(Tree_level_index const,
|
||||
Tree_level_index const,
|
||||
Physical_block_address const,
|
||||
unsigned const prim_idx,
|
||||
Channel::State &,
|
||||
bool &progress,
|
||||
Channel::Generated_prim &);
|
||||
|
||||
void _add_new_root_lvl_to_ft_using_pba_contingent(Type_1_node &,
|
||||
Tree_level_index &,
|
||||
Number_of_leaves const,
|
||||
Generation const,
|
||||
Channel::Type_1_node_blocks &,
|
||||
Tree_walk_pbas &,
|
||||
Physical_block_address &,
|
||||
Number_of_blocks &);
|
||||
|
||||
void _add_new_branch_to_ft_using_pba_contingent(Tree_level_index const,
|
||||
Tree_node_index const,
|
||||
Tree_degree const,
|
||||
Generation const,
|
||||
Physical_block_address &,
|
||||
Number_of_blocks &,
|
||||
Channel::Type_1_node_blocks &,
|
||||
Type_2_node_block &,
|
||||
Tree_walk_pbas &,
|
||||
Tree_level_index &,
|
||||
Number_of_leaves &);
|
||||
|
||||
void _execute_ft_extension_step(Channel &, unsigned const idx, bool &);
|
||||
|
||||
void _execute_ft_ext_step_read_inner_node_completed(Channel &,
|
||||
unsigned const job_idx,
|
||||
bool &progress);
|
||||
|
||||
/************
|
||||
** Module **
|
||||
************/
|
||||
|
||||
bool _peek_completed_request(uint8_t *buf_ptr,
|
||||
size_t buf_size) override;
|
||||
|
||||
void _drop_completed_request(Module_request &req) override;
|
||||
|
||||
bool _peek_generated_request(uint8_t *buf_ptr,
|
||||
size_t buf_size) override;
|
||||
|
||||
void _drop_generated_request(Module_request &mod_req) override;
|
||||
|
||||
void generated_request_complete(Module_request &req) override;
|
||||
|
||||
|
||||
public:
|
||||
|
||||
/************
|
||||
** Module **
|
||||
************/
|
||||
|
||||
bool ready_to_submit_request() override;
|
||||
|
||||
void submit_request(Module_request &req) override;
|
||||
|
||||
void execute(bool &) override;
|
||||
};
|
||||
|
||||
#endif /* _TRESOR__FT_RESIZING_H_ */
|
29
repos/gems/src/lib/tresor/include/tresor/hash.h
Normal file
29
repos/gems/src/lib/tresor/include/tresor/hash.h
Normal file
@ -0,0 +1,29 @@
|
||||
/*
|
||||
* \brief Calculate and check hashes of tresor data blocks
|
||||
* \author Martin Stein
|
||||
* \date 2023-02-13
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2023 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
#ifndef _TRESOR__HASH_H_
|
||||
#define _TRESOR__HASH_H_
|
||||
|
||||
namespace Tresor {
|
||||
|
||||
class Block;
|
||||
class Hash;
|
||||
|
||||
void calc_hash(Block const &, Hash &);
|
||||
|
||||
bool check_hash(Block const &, Hash const &);
|
||||
|
||||
Hash hash(Block const &);
|
||||
}
|
||||
|
||||
#endif /* _TRESOR__HASH_H_ */
|
@ -14,15 +14,15 @@
|
||||
#ifndef _TRESOR__MATH_H_
|
||||
#define _TRESOR__MATH_H_
|
||||
|
||||
/* tresor includes */
|
||||
#include <tresor/assertion.h>
|
||||
|
||||
namespace Tresor {
|
||||
|
||||
template <typename T>
|
||||
constexpr T to_the_power_of(T base, T exponent)
|
||||
{
|
||||
if (exponent < 0) {
|
||||
class Negative_exponent { };
|
||||
throw Negative_exponent { };
|
||||
}
|
||||
ASSERT(exponent >= 0);
|
||||
if (exponent == 0)
|
||||
return 1;
|
||||
|
||||
@ -32,6 +32,13 @@ namespace Tresor {
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
bool is_power_of_2(T val)
|
||||
{
|
||||
for (; val && (val & 1) == 0; val >>= 1);
|
||||
return val == 1;
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* _TRESOR__MATH_H_ */
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* \brief Module for doing free tree COW allocations on the meta tree
|
||||
* \brief Module for doing VBD COW allocations on the meta tree
|
||||
* \author Martin Stein
|
||||
* \date 2023-02-13
|
||||
*/
|
||||
@ -16,8 +16,6 @@
|
||||
|
||||
/* tresor includes */
|
||||
#include <tresor/types.h>
|
||||
#include <tresor/module.h>
|
||||
#include <tresor/sha256_4k_hash.h>
|
||||
|
||||
namespace Tresor {
|
||||
|
||||
@ -28,226 +26,102 @@ namespace Tresor {
|
||||
|
||||
class Tresor::Meta_tree_request : public Module_request
|
||||
{
|
||||
friend class Meta_tree_channel;
|
||||
|
||||
public:
|
||||
|
||||
enum Type { INVALID = 0, UPDATE = 1 };
|
||||
enum Type { ALLOC_PBA };
|
||||
|
||||
private:
|
||||
|
||||
friend class Meta_tree;
|
||||
friend class Meta_tree_channel;
|
||||
Type const _type;
|
||||
Tree_root &_mt;
|
||||
Generation const _curr_gen;
|
||||
Physical_block_address &_pba;
|
||||
bool &_success;
|
||||
|
||||
Type _type { INVALID };
|
||||
addr_t _mt_root_pba_ptr { 0 };
|
||||
addr_t _mt_root_gen_ptr { 0 };
|
||||
addr_t _mt_root_hash_ptr { 0 };
|
||||
uint64_t _mt_max_lvl { 0 };
|
||||
uint64_t _mt_edges { 0 };
|
||||
uint64_t _mt_leaves { 0 };
|
||||
uint64_t _current_gen { 0 };
|
||||
uint64_t _old_pba { INVALID_PBA };
|
||||
uint64_t _new_pba { INVALID_PBA };
|
||||
bool _success { false };
|
||||
NONCOPYABLE(Meta_tree_request);
|
||||
|
||||
public:
|
||||
|
||||
Meta_tree_request() { }
|
||||
Meta_tree_request(Module_id, Module_channel_id, Type, Tree_root &, Generation, Physical_block_address &, bool &);
|
||||
|
||||
Meta_tree_request(Module_id src_module_id,
|
||||
Module_request_id src_request_id);
|
||||
|
||||
static void create(void *buf_ptr,
|
||||
size_t buf_size,
|
||||
uint64_t src_module_id,
|
||||
uint64_t src_request_id,
|
||||
size_t req_type,
|
||||
void *mt_root_pba_ptr,
|
||||
void *mt_root_gen_ptr,
|
||||
void *mt_root_hash_ptr,
|
||||
uint64_t mt_max_lvl,
|
||||
uint64_t mt_edges,
|
||||
uint64_t mt_leaves,
|
||||
uint64_t curr_gen,
|
||||
uint64_t old_pba);
|
||||
|
||||
uint64_t new_pba() { return _new_pba; }
|
||||
|
||||
Type type() const { return _type; }
|
||||
|
||||
bool success() const { return _success; }
|
||||
|
||||
static char const *type_to_string(Type type);
|
||||
|
||||
char const *type_name() const { return type_to_string(_type); }
|
||||
|
||||
|
||||
/********************
|
||||
** Module_request **
|
||||
********************/
|
||||
static char const *type_to_string(Type);
|
||||
|
||||
void print(Output &out) const override { Genode::print(out, type_to_string(_type)); }
|
||||
};
|
||||
|
||||
|
||||
class Tresor::Meta_tree_channel
|
||||
class Tresor::Meta_tree_channel : public Module_channel
|
||||
{
|
||||
private:
|
||||
|
||||
friend class Meta_tree;
|
||||
using Request = Meta_tree_request;
|
||||
|
||||
enum State {
|
||||
INVALID,
|
||||
UPDATE,
|
||||
COMPLETE,
|
||||
TREE_HASH_MISMATCH
|
||||
};
|
||||
enum State { REQ_SUBMITTED, REQ_GENERATED, SEEK_DOWN, SEEK_LEFT_OR_UP, WRITE_BLK, COMPLETE };
|
||||
|
||||
struct Type_1_info
|
||||
State _state { COMPLETE };
|
||||
Request *_req_ptr { nullptr };
|
||||
Block _blk { };
|
||||
Tree_node_index _node_idx[TREE_MAX_NR_OF_LEVELS] { };
|
||||
Type_1_node_block _t1_blks[TREE_MAX_NR_OF_LEVELS] { };
|
||||
Type_2_node_block _t2_blk { };
|
||||
Tree_level_index _lvl { 0 };
|
||||
bool _generated_req_success { false };
|
||||
|
||||
NONCOPYABLE(Meta_tree_channel);
|
||||
|
||||
void _generated_req_completed(State_uint) override;
|
||||
|
||||
template <typename REQUEST, typename... ARGS>
|
||||
void _generate_req(State_uint state, bool &progress, ARGS &&... args)
|
||||
{
|
||||
enum State {
|
||||
INVALID, READ, READ_COMPLETE, WRITE, WRITE_COMPLETE, COMPLETE };
|
||||
_state = REQ_GENERATED;
|
||||
generate_req<REQUEST>(state, progress, args..., _generated_req_success);
|
||||
}
|
||||
|
||||
State state { INVALID };
|
||||
Type_1_node node { };
|
||||
Type_1_node_block entries { };
|
||||
uint8_t index { INVALID_NODE_INDEX };
|
||||
bool dirty { false };
|
||||
bool volatil { false };
|
||||
};
|
||||
void _request_submitted(Module_request &) override;
|
||||
|
||||
struct Type_2_info
|
||||
{
|
||||
enum State {
|
||||
INVALID, READ, READ_COMPLETE, WRITE, WRITE_COMPLETE, COMPLETE };
|
||||
bool _request_complete() override { return _state == COMPLETE; }
|
||||
|
||||
State state { INVALID };
|
||||
Type_1_node node { };
|
||||
Type_2_node_block entries { };
|
||||
uint8_t index { INVALID_NODE_INDEX };
|
||||
bool volatil { false };
|
||||
};
|
||||
void _mark_req_failed(bool &, char const *);
|
||||
|
||||
struct Local_cache_request
|
||||
{
|
||||
enum State { INVALID, PENDING, IN_PROGRESS };
|
||||
enum Op { READ, WRITE, SYNC };
|
||||
bool _can_alloc_pba_of(Type_2_node &);
|
||||
|
||||
State state { INVALID };
|
||||
Op op { READ };
|
||||
bool success { false };
|
||||
uint64_t pba { 0 };
|
||||
uint64_t level { 0 };
|
||||
Block block_data { };
|
||||
void _alloc_pba_of(Type_2_node &, Physical_block_address &);
|
||||
|
||||
Local_cache_request(State state,
|
||||
Op op,
|
||||
bool success,
|
||||
uint64_t pba,
|
||||
uint64_t level,
|
||||
Block const *blk_ptr)
|
||||
:
|
||||
state { state },
|
||||
op { op },
|
||||
success { success },
|
||||
pba { pba },
|
||||
level { level }
|
||||
{
|
||||
if (blk_ptr != nullptr) {
|
||||
block_data = *blk_ptr;
|
||||
}
|
||||
}
|
||||
void _traverse_curr_node(bool &);
|
||||
|
||||
Local_cache_request() { }
|
||||
};
|
||||
void _mark_req_successful(bool &);
|
||||
|
||||
State _state { INVALID };
|
||||
Meta_tree_request _request { };
|
||||
Local_cache_request _cache_request { };
|
||||
Type_2_info _level_1_node { };
|
||||
Type_1_info _level_n_nodes[TREE_MAX_NR_OF_LEVELS] { }; /* index starts at 2 */
|
||||
bool _finished { false };
|
||||
bool _root_dirty { false };
|
||||
void _start_tree_traversal(bool &);
|
||||
|
||||
public:
|
||||
|
||||
Meta_tree_channel(Module_channel_id id) : Module_channel { META_TREE, id } { }
|
||||
|
||||
void execute(bool &);
|
||||
};
|
||||
|
||||
class Tresor::Meta_tree : public Module
|
||||
{
|
||||
private:
|
||||
|
||||
using Request = Meta_tree_request;
|
||||
using Channel = Meta_tree_channel;
|
||||
using Local_cache_request = Channel::Local_cache_request;
|
||||
using Type_1_info = Channel::Type_1_info;
|
||||
using Type_2_info = Channel::Type_2_info;
|
||||
|
||||
enum { NR_OF_CHANNELS = 1 };
|
||||
Constructible<Channel> _channels[1] { };
|
||||
|
||||
Channel _channels[NR_OF_CHANNELS] { };
|
||||
|
||||
void _handle_level_n_nodes(Channel &channel,
|
||||
bool &handled);
|
||||
|
||||
void _handle_level_1_node(Channel &channel,
|
||||
bool &handled);
|
||||
|
||||
void _exchange_request_pba(Channel &channel,
|
||||
Type_2_node &t2_entry);
|
||||
|
||||
void _exchange_nv_inner_nodes(Channel &channel,
|
||||
Type_2_node &t2_entry,
|
||||
bool &exchanged);
|
||||
|
||||
void _exchange_nv_level_1_node(Channel &channel,
|
||||
Type_2_node &t2_entry,
|
||||
bool &exchanged);
|
||||
|
||||
bool _node_volatile(Type_1_node const &node,
|
||||
uint64_t gen);
|
||||
|
||||
void _handle_level_0_nodes(Channel &channel,
|
||||
bool &handled);
|
||||
|
||||
void _update_parent(Type_1_node &node,
|
||||
Block const &blk,
|
||||
uint64_t gen,
|
||||
uint64_t pba);
|
||||
|
||||
void _handle_level_0_nodes(bool &handled);
|
||||
|
||||
void _execute_update(Channel &channel,
|
||||
bool &progress);
|
||||
|
||||
void _mark_req_failed(Channel &channel,
|
||||
bool &progress,
|
||||
char const *str);
|
||||
|
||||
void _mark_req_successful(Channel &channel,
|
||||
bool &progress);
|
||||
|
||||
|
||||
/************
|
||||
** Module **
|
||||
************/
|
||||
|
||||
bool ready_to_submit_request() override;
|
||||
|
||||
void submit_request(Module_request &req) override;
|
||||
|
||||
bool _peek_completed_request(uint8_t *buf_ptr,
|
||||
size_t buf_size) override;
|
||||
|
||||
void _drop_completed_request(Module_request &req) override;
|
||||
NONCOPYABLE(Meta_tree);
|
||||
|
||||
void execute(bool &) override;
|
||||
|
||||
bool _peek_generated_request(uint8_t *buf_ptr,
|
||||
size_t buf_size) override;
|
||||
|
||||
void _drop_generated_request(Module_request &mod_req) override;
|
||||
|
||||
void generated_request_complete(Module_request &req) override;
|
||||
|
||||
public:
|
||||
|
||||
struct Alloc_pba : Meta_tree_request
|
||||
{
|
||||
Alloc_pba(Module_id src_mod, Module_channel_id src_chan, Tree_root &mt, Generation gen, Physical_block_address &pba, bool &succ)
|
||||
: Meta_tree_request(src_mod, src_chan, Meta_tree_request::ALLOC_PBA, mt, gen, pba, succ) { }
|
||||
};
|
||||
|
||||
Meta_tree();
|
||||
};
|
||||
|
||||
|
@ -15,52 +15,31 @@
|
||||
#define _TRESOR__MODULE_H_
|
||||
|
||||
/* base includes */
|
||||
#include <util/string.h>
|
||||
#include <base/log.h>
|
||||
#include <util/avl_tree.h>
|
||||
|
||||
/* tresor includes */
|
||||
#include <tresor/verbosity.h>
|
||||
#include <tresor/construct_in_buf.h>
|
||||
#include <tresor/noncopyable.h>
|
||||
#include <tresor/assertion.h>
|
||||
|
||||
namespace Tresor {
|
||||
|
||||
using namespace Genode;
|
||||
|
||||
using Module_id = uint64_t;
|
||||
using Module_request_id = uint64_t;
|
||||
using Module_channel_id = uint64_t;
|
||||
|
||||
enum {
|
||||
INVALID_MODULE_ID = ~0UL,
|
||||
INVALID_MODULE_REQUEST_ID = ~0UL,
|
||||
};
|
||||
enum { INVALID_MODULE_ID = ~(Module_id)0, INVALID_MODULE_CHANNEL_ID = ~(Module_channel_id)0 };
|
||||
|
||||
enum Module_id_enum : Module_id
|
||||
{
|
||||
CRYPTO = 0,
|
||||
CLIENT_DATA = 1,
|
||||
TRUST_ANCHOR = 2,
|
||||
COMMAND_POOL = 3,
|
||||
BLOCK_IO = 4,
|
||||
CACHE = 5,
|
||||
META_TREE = 6,
|
||||
FREE_TREE = 7,
|
||||
VIRTUAL_BLOCK_DEVICE = 8,
|
||||
SUPERBLOCK_CONTROL = 9,
|
||||
BLOCK_ALLOCATOR = 10,
|
||||
VBD_INITIALIZER = 11,
|
||||
FT_INITIALIZER = 12,
|
||||
SB_INITIALIZER = 13,
|
||||
REQUEST_POOL = 14,
|
||||
SB_CHECK = 15,
|
||||
VBD_CHECK = 16,
|
||||
FT_CHECK = 17,
|
||||
FT_RESIZING = 18,
|
||||
MAX_MODULE_ID = 18,
|
||||
};
|
||||
enum Module_id_enum : Module_id {
|
||||
CRYPTO = 0, CLIENT_DATA = 1, TRUST_ANCHOR = 2, COMMAND_POOL = 3, BLOCK_IO = 4, CACHE = 5, META_TREE = 6,
|
||||
FREE_TREE = 7, VIRTUAL_BLOCK_DEVICE = 8, SUPERBLOCK_CONTROL = 9, VBD_INITIALIZER = 10, FT_INITIALIZER = 11,
|
||||
SB_INITIALIZER = 12, REQUEST_POOL = 13, SB_CHECK = 14, VBD_CHECK = 15, FT_CHECK = 16, SPLITTER = 17, MAX_MODULE_ID = 17 };
|
||||
|
||||
char const *module_name(Module_id module_id);
|
||||
|
||||
class Module_request;
|
||||
class Module_channel;
|
||||
class Module;
|
||||
class Module_composition;
|
||||
}
|
||||
@ -70,38 +49,96 @@ class Tresor::Module_request : public Interface
|
||||
{
|
||||
private:
|
||||
|
||||
Module_id _src_module_id { INVALID_MODULE_ID };
|
||||
Module_request_id _src_request_id { INVALID_MODULE_REQUEST_ID };
|
||||
Module_id _dst_module_id { INVALID_MODULE_ID };
|
||||
Module_request_id _dst_request_id { INVALID_MODULE_REQUEST_ID };
|
||||
Module_id _src_module_id;
|
||||
Module_channel_id _src_chan_id;
|
||||
Module_id _dst_module_id;
|
||||
Module_channel_id _dst_chan_id { INVALID_MODULE_CHANNEL_ID };
|
||||
|
||||
NONCOPYABLE(Module_request);
|
||||
|
||||
public:
|
||||
|
||||
Module_request() { }
|
||||
Module_request(Module_id, Module_channel_id, Module_id);
|
||||
|
||||
Module_request(Module_id src_module_id,
|
||||
Module_request_id src_request_id,
|
||||
Module_id dst_module_id);
|
||||
void dst_chan_id(Module_channel_id id) { _dst_chan_id = id; }
|
||||
|
||||
void dst_request_id(Module_request_id id) { _dst_request_id = id; }
|
||||
|
||||
String<32> src_request_id_str() const;
|
||||
|
||||
String<32> dst_request_id_str() const;
|
||||
Module_id src_module_id() const { return _src_module_id; }
|
||||
Module_channel_id src_chan_id() const { return _src_chan_id; }
|
||||
Module_id dst_module_id() const { return _dst_module_id; }
|
||||
Module_channel_id dst_chan_id() const { return _dst_chan_id; }
|
||||
|
||||
virtual void print(Output &) const = 0;
|
||||
|
||||
virtual ~Module_request() { }
|
||||
};
|
||||
|
||||
|
||||
/***************
|
||||
** Accessors **
|
||||
***************/
|
||||
class Tresor::Module_channel : private Avl_node<Module_channel>
|
||||
{
|
||||
friend class Module;
|
||||
friend class Avl_node<Module_channel>;
|
||||
friend class Avl_tree<Module_channel>;
|
||||
|
||||
Module_id src_module_id() const { return _src_module_id; }
|
||||
Module_request_id src_request_id() const { return _src_request_id; }
|
||||
Module_id dst_module_id() const { return _dst_module_id; }
|
||||
Module_request_id dst_request_id() const { return _dst_request_id; }
|
||||
public:
|
||||
|
||||
using State_uint = uint64_t;
|
||||
|
||||
private:
|
||||
|
||||
enum { GEN_REQ_BUF_SIZE = 4000 };
|
||||
|
||||
enum Generated_request_state { NONE, PENDING, IN_PROGRESS };
|
||||
|
||||
Module_request *_req_ptr { nullptr };
|
||||
Module_id _module_id;
|
||||
Module_channel_id _id;
|
||||
Generated_request_state _gen_req_state { NONE };
|
||||
uint8_t _gen_req_buf[GEN_REQ_BUF_SIZE] { };
|
||||
State_uint _gen_req_complete_state { 0 };
|
||||
|
||||
NONCOPYABLE(Module_channel);
|
||||
|
||||
bool higher(Module_channel *ptr) { return ptr->_id > _id; }
|
||||
|
||||
virtual void _generated_req_completed(State_uint) { ASSERT_NEVER_REACHED; }
|
||||
|
||||
virtual void _request_submitted(Module_request &) { ASSERT_NEVER_REACHED; }
|
||||
|
||||
virtual bool _request_complete() { ASSERT_NEVER_REACHED; }
|
||||
|
||||
public:
|
||||
|
||||
Module_channel(Module_id module_id, Module_channel_id id) : _module_id { module_id }, _id { id } { };
|
||||
|
||||
template <typename REQUEST, typename... ARGS>
|
||||
void generate_req(State_uint complete_state, bool &progress, ARGS &&... args)
|
||||
{
|
||||
ASSERT(_gen_req_state == NONE);
|
||||
static_assert(sizeof(REQUEST) <= GEN_REQ_BUF_SIZE);
|
||||
construct_at<REQUEST>(_gen_req_buf, _module_id, _id, args...);
|
||||
_gen_req_state = PENDING;
|
||||
_gen_req_complete_state = complete_state;
|
||||
progress = true;
|
||||
}
|
||||
|
||||
template <typename CHAN, typename FUNC>
|
||||
void with_channel(Module_channel_id id, FUNC && func)
|
||||
{
|
||||
if (id != _id) {
|
||||
Module_channel *chan_ptr { Avl_node<Module_channel>::child(id > _id) };
|
||||
ASSERT(chan_ptr);
|
||||
chan_ptr->with_channel<CHAN>(id, func);
|
||||
} else
|
||||
func(*static_cast<CHAN *>(this));
|
||||
}
|
||||
|
||||
void generated_req_completed();
|
||||
|
||||
bool try_submit_request(Module_request &);
|
||||
|
||||
Module_channel_id id() const { return _id; }
|
||||
|
||||
virtual ~Module_channel() { }
|
||||
};
|
||||
|
||||
|
||||
@ -109,86 +146,62 @@ class Tresor::Module : public Interface
|
||||
{
|
||||
private:
|
||||
|
||||
virtual bool _peek_completed_request(uint8_t *,
|
||||
size_t )
|
||||
{
|
||||
return false;
|
||||
}
|
||||
Avl_tree<Module_channel> _channels { };
|
||||
|
||||
virtual void _drop_completed_request(Module_request &)
|
||||
{
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
|
||||
virtual bool _peek_generated_request(uint8_t *,
|
||||
size_t )
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
virtual void _drop_generated_request(Module_request &)
|
||||
{
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
NONCOPYABLE(Module);
|
||||
|
||||
public:
|
||||
|
||||
enum Handle_request_result { REQUEST_HANDLED, REQUEST_NOT_HANDLED };
|
||||
|
||||
typedef Handle_request_result (
|
||||
*Handle_request_function)(Module_request &req);
|
||||
|
||||
virtual bool ready_to_submit_request() { return false; };
|
||||
|
||||
virtual void submit_request(Module_request &)
|
||||
template <typename CHAN = Module_channel, typename FUNC>
|
||||
void with_channel(Module_channel_id id, FUNC && func)
|
||||
{
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
ASSERT(_channels.first());
|
||||
_channels.first()->with_channel<CHAN>(id, func);
|
||||
}
|
||||
|
||||
virtual void execute(bool &) { }
|
||||
template <typename CHAN = Module_channel, typename FUNC>
|
||||
void for_each_channel(FUNC && func)
|
||||
{
|
||||
_channels.for_each([&] (Module_channel const &const_chan) {
|
||||
func(*static_cast<CHAN *>(const_cast<Module_channel *>(&const_chan))); });
|
||||
}
|
||||
|
||||
template <typename FUNC>
|
||||
void for_each_generated_request(FUNC && handle_request)
|
||||
{
|
||||
uint8_t buf[4000];
|
||||
while (_peek_generated_request(buf, sizeof(buf))) {
|
||||
|
||||
Module_request &req = *(Module_request *)buf;
|
||||
switch (handle_request(req)) {
|
||||
case Module::REQUEST_HANDLED:
|
||||
|
||||
_drop_generated_request(req);
|
||||
break;
|
||||
|
||||
case Module::REQUEST_NOT_HANDLED:
|
||||
for_each_channel([&] (Module_channel &chan) {
|
||||
if (chan._gen_req_state != Module_channel::PENDING)
|
||||
return;
|
||||
|
||||
Module_request &req = *(Module_request *)chan._gen_req_buf;
|
||||
if (handle_request(req)) {
|
||||
chan._gen_req_state = Module_channel::IN_PROGRESS;
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
virtual void generated_request_complete(Module_request &)
|
||||
{
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
});
|
||||
}
|
||||
|
||||
template <typename FUNC>
|
||||
void for_each_completed_request(FUNC && handle_request)
|
||||
{
|
||||
uint8_t buf[4000];
|
||||
while (_peek_completed_request(buf, sizeof(buf))) {
|
||||
|
||||
Module_request &req = *(Module_request *)buf;
|
||||
handle_request(req);
|
||||
_drop_completed_request(req);
|
||||
}
|
||||
for_each_channel([&] (Module_channel &chan) {
|
||||
if (chan._req_ptr && chan._request_complete()) {
|
||||
handle_request(*chan._req_ptr);
|
||||
chan._req_ptr = nullptr;
|
||||
}
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
bool try_submit_request(Module_request &);
|
||||
|
||||
void add_channel(Module_channel &chan) { _channels.insert(&chan); }
|
||||
|
||||
Module() { }
|
||||
|
||||
virtual ~Module() { }
|
||||
|
||||
virtual void execute(bool &) { }
|
||||
};
|
||||
|
||||
|
||||
@ -200,92 +213,11 @@ class Tresor::Module_composition
|
||||
|
||||
public:
|
||||
|
||||
void add_module(Module_id module_id,
|
||||
Module &module)
|
||||
{
|
||||
if (module_id > MAX_MODULE_ID) {
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
if (_module_ptrs[module_id] != nullptr) {
|
||||
class Exception_2 { };
|
||||
throw Exception_2 { };
|
||||
}
|
||||
_module_ptrs[module_id] = &module;
|
||||
}
|
||||
void add_module(Module_id module_id, Module &mod);
|
||||
|
||||
void remove_module(Module_id module_id)
|
||||
{
|
||||
if (module_id > MAX_MODULE_ID) {
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
if (_module_ptrs[module_id] == nullptr) {
|
||||
class Exception_2 { };
|
||||
throw Exception_2 { };
|
||||
}
|
||||
_module_ptrs[module_id] = nullptr;
|
||||
}
|
||||
void remove_module(Module_id module_id);
|
||||
|
||||
void execute_modules(bool &progress)
|
||||
{
|
||||
for (Module_id id { 0 }; id <= MAX_MODULE_ID; id++) {
|
||||
|
||||
if (_module_ptrs[id] == nullptr)
|
||||
continue;
|
||||
|
||||
Module *module_ptr { _module_ptrs[id] };
|
||||
module_ptr->execute(progress);
|
||||
module_ptr->for_each_generated_request([&] (Module_request &req) {
|
||||
if (req.dst_module_id() > MAX_MODULE_ID) {
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
if (_module_ptrs[req.dst_module_id()] == nullptr) {
|
||||
class Exception_2 { };
|
||||
throw Exception_2 { };
|
||||
}
|
||||
Module &dst_module { *_module_ptrs[req.dst_module_id()] };
|
||||
if (!dst_module.ready_to_submit_request()) {
|
||||
|
||||
if (VERBOSE_MODULE_COMMUNICATION)
|
||||
log(
|
||||
module_name(id), " ", req.src_request_id_str(),
|
||||
" --", req, "-| ",
|
||||
module_name(req.dst_module_id()));
|
||||
|
||||
return Module::REQUEST_NOT_HANDLED;
|
||||
}
|
||||
dst_module.submit_request(req);
|
||||
|
||||
if (VERBOSE_MODULE_COMMUNICATION)
|
||||
log(
|
||||
module_name(id), " ", req.src_request_id_str(),
|
||||
" --", req, "--> ",
|
||||
module_name(req.dst_module_id()), " ",
|
||||
req.dst_request_id_str());
|
||||
|
||||
progress = true;
|
||||
return Module::REQUEST_HANDLED;
|
||||
});
|
||||
module_ptr->for_each_completed_request([&] (Module_request &req) {
|
||||
if (req.src_module_id() > MAX_MODULE_ID) {
|
||||
class Exception_3 { };
|
||||
throw Exception_3 { };
|
||||
}
|
||||
if (VERBOSE_MODULE_COMMUNICATION)
|
||||
log(
|
||||
module_name(req.src_module_id()), " ",
|
||||
req.src_request_id_str(), " <--", req,
|
||||
"-- ", module_name(id), " ",
|
||||
req.dst_request_id_str());
|
||||
|
||||
Module &src_module { *_module_ptrs[req.src_module_id()] };
|
||||
src_module.generated_request_complete(req);
|
||||
progress = true;
|
||||
});
|
||||
}
|
||||
}
|
||||
void execute_modules();
|
||||
};
|
||||
|
||||
#endif /* _TRESOR__MODULE_H_ */
|
||||
|
21
repos/gems/src/lib/tresor/include/tresor/noncopyable.h
Normal file
21
repos/gems/src/lib/tresor/include/tresor/noncopyable.h
Normal file
@ -0,0 +1,21 @@
|
||||
/*
|
||||
* \brief Macro to make a class non-copyable
|
||||
* \author Martin Stein
|
||||
* \date 2023-06-09
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2023 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
#ifndef _TRESOR__NONCOPYABLE_H_
|
||||
#define _TRESOR__NONCOPYABLE_H_
|
||||
|
||||
#define NONCOPYABLE(class_name) \
|
||||
class_name(class_name const &) = delete; \
|
||||
class_name &operator = (class_name const &) = delete; \
|
||||
|
||||
#endif /* _TRESOR__NONCOPYABLE_H_ */
|
@ -15,474 +15,166 @@
|
||||
#define _TRESOR__REQUEST_POOL_H_
|
||||
|
||||
/* tresor includes */
|
||||
#include <tresor/module.h>
|
||||
#include <tresor/types.h>
|
||||
#include <tresor/vfs_utilities.h>
|
||||
#include <tresor/superblock_control.h>
|
||||
|
||||
namespace Tresor {
|
||||
|
||||
class Request;
|
||||
class Request_pool;
|
||||
class Request_pool_request;
|
||||
class Request_pool_channel;
|
||||
|
||||
|
||||
class Request : public Module_request
|
||||
{
|
||||
friend class Request_pool;
|
||||
|
||||
public:
|
||||
|
||||
enum Operation : uint32_t {
|
||||
INVALID = 0,
|
||||
READ = 1,
|
||||
WRITE = 2,
|
||||
SYNC = 3,
|
||||
CREATE_SNAPSHOT = 4,
|
||||
DISCARD_SNAPSHOT = 5,
|
||||
REKEY = 6,
|
||||
EXTEND_VBD = 7,
|
||||
EXTEND_FT = 8,
|
||||
RESUME_REKEYING = 10,
|
||||
DEINITIALIZE = 11,
|
||||
INITIALIZE = 12,
|
||||
};
|
||||
|
||||
private:
|
||||
|
||||
Operation _operation;
|
||||
bool _success;
|
||||
uint64_t _block_number;
|
||||
uint64_t _offset;
|
||||
Number_of_blocks _count;
|
||||
uint32_t _key_id;
|
||||
uint32_t _tag;
|
||||
Generation _gen;
|
||||
|
||||
public:
|
||||
|
||||
Request(Operation operation,
|
||||
bool success,
|
||||
uint64_t block_number,
|
||||
uint64_t offset,
|
||||
Number_of_blocks count,
|
||||
uint32_t key_id,
|
||||
uint32_t tag,
|
||||
Generation gen,
|
||||
Module_id src_module_id,
|
||||
Module_request_id src_request_id)
|
||||
:
|
||||
Module_request { src_module_id, src_request_id, REQUEST_POOL },
|
||||
_operation { operation },
|
||||
_success { success },
|
||||
_block_number { block_number },
|
||||
_offset { offset },
|
||||
_count { count },
|
||||
_key_id { key_id },
|
||||
_tag { tag },
|
||||
_gen { gen }
|
||||
{ }
|
||||
|
||||
Request()
|
||||
:
|
||||
Module_request { },
|
||||
_operation { Operation::INVALID },
|
||||
_success { false },
|
||||
_block_number { 0 },
|
||||
_offset { 0 },
|
||||
_count { 0 },
|
||||
_key_id { 0 },
|
||||
_tag { 0 },
|
||||
_gen { 0 }
|
||||
{ }
|
||||
|
||||
bool valid() const
|
||||
{
|
||||
return _operation != Operation::INVALID;
|
||||
}
|
||||
|
||||
|
||||
/***************
|
||||
** Accessors **
|
||||
***************/
|
||||
|
||||
bool read() const { return _operation == Operation::READ; }
|
||||
bool write() const { return _operation == Operation::WRITE; }
|
||||
bool sync() const { return _operation == Operation::SYNC; }
|
||||
bool create_snapshot() const { return _operation == Operation::CREATE_SNAPSHOT; }
|
||||
bool discard_snapshot() const { return _operation == Operation::DISCARD_SNAPSHOT; }
|
||||
bool rekey() const { return _operation == Operation::REKEY; }
|
||||
bool extend_vbd() const { return _operation == Operation::EXTEND_VBD; }
|
||||
bool extend_ft() const { return _operation == Operation::EXTEND_FT; }
|
||||
bool resume_rekeying() const { return _operation == Operation::RESUME_REKEYING; }
|
||||
bool deinitialize() const { return _operation == Operation::DEINITIALIZE; }
|
||||
bool initialize() const { return _operation == Operation::INITIALIZE; }
|
||||
|
||||
Operation operation() const { return _operation; }
|
||||
bool success() const { return _success; }
|
||||
uint64_t block_number() const { return _block_number; }
|
||||
uint64_t offset() const { return _offset; }
|
||||
Number_of_blocks count() const { return _count; }
|
||||
uint32_t key_id() const { return _key_id; }
|
||||
uint32_t tag() const { return _tag; }
|
||||
Generation gen() const { return _gen; }
|
||||
|
||||
void offset(uint64_t arg) { _offset = arg; }
|
||||
void success(bool arg) { _success = arg; }
|
||||
void tag(uint32_t arg) { _tag = arg; }
|
||||
void gen(Generation arg) { _gen = arg; }
|
||||
|
||||
static char const *op_to_string(Operation op);
|
||||
|
||||
|
||||
/********************
|
||||
** Module_request **
|
||||
********************/
|
||||
|
||||
void print(Output &out) const override
|
||||
{
|
||||
Genode::print(out, op_to_string(_operation));
|
||||
switch (_operation) {
|
||||
case READ:
|
||||
case WRITE:
|
||||
case SYNC:
|
||||
if (_count > 1)
|
||||
Genode::print(out, " vbas ", _block_number, "..", _block_number + _count - 1);
|
||||
else
|
||||
Genode::print(out, " vba ", _block_number);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
};
|
||||
class Request_pool_channel_queue;
|
||||
}
|
||||
|
||||
class Tresor::Request_pool_channel
|
||||
class Tresor::Request : public Module_request
|
||||
{
|
||||
private:
|
||||
NONCOPYABLE(Request);
|
||||
|
||||
friend class Request_pool;
|
||||
|
||||
enum State {
|
||||
INVALID,
|
||||
SUBMITTED,
|
||||
SUBMITTED_RESUME_REKEYING,
|
||||
REKEY_INIT_PENDING,
|
||||
REKEY_INIT_IN_PROGRESS,
|
||||
REKEY_INIT_COMPLETE,
|
||||
PREPONE_REQUESTS_PENDING,
|
||||
PREPONE_REQUESTS_COMPLETE,
|
||||
VBD_EXTENSION_STEP_PENDING,
|
||||
FT_EXTENSION_STEP_PENDING,
|
||||
TREE_EXTENSION_STEP_IN_PROGRESS,
|
||||
TREE_EXTENSION_STEP_COMPLETE,
|
||||
CREATE_SNAP_AT_SB_CTRL_PENDING,
|
||||
CREATE_SNAP_AT_SB_CTRL_IN_PROGRESS,
|
||||
CREATE_SNAP_AT_SB_CTRL_COMPLETE,
|
||||
SYNC_AT_SB_CTRL_PENDING,
|
||||
SYNC_AT_SB_CTRL_IN_PROGRESS,
|
||||
SYNC_AT_SB_CTRL_COMPLETE,
|
||||
READ_VBA_AT_SB_CTRL_PENDING,
|
||||
READ_VBA_AT_SB_CTRL_IN_PROGRESS,
|
||||
READ_VBA_AT_SB_CTRL_COMPLETE,
|
||||
WRITE_VBA_AT_SB_CTRL_PENDING,
|
||||
WRITE_VBA_AT_SB_CTRL_IN_PROGRESS,
|
||||
WRITE_VBA_AT_SB_CTRL_COMPLETE,
|
||||
DISCARD_SNAP_AT_SB_CTRL_PENDING,
|
||||
DISCARD_SNAP_AT_SB_CTRL_IN_PROGRESS,
|
||||
DISCARD_SNAP_AT_SB_CTRL_COMPLETE,
|
||||
REKEY_VBA_PENDING,
|
||||
REKEY_VBA_IN_PROGRESS,
|
||||
REKEY_VBA_COMPLETE,
|
||||
INITIALIZE_SB_CTRL_PENDING,
|
||||
INITIALIZE_SB_CTRL_IN_PROGRESS,
|
||||
INITIALIZE_SB_CTRL_COMPLETE,
|
||||
DEINITIALIZE_SB_CTRL_PENDING,
|
||||
DEINITIALIZE_SB_CTRL_IN_PROGRESS,
|
||||
DEINITIALIZE_SB_CTRL_COMPLETE,
|
||||
COMPLETE
|
||||
};
|
||||
|
||||
enum Tag_type {
|
||||
TAG_POOL_SB_CTRL_TREE_EXT_STEP,
|
||||
TAG_POOL_SB_CTRL_READ_VBA,
|
||||
TAG_POOL_SB_CTRL_WRITE_VBA,
|
||||
TAG_POOL_SB_CTRL_SYNC,
|
||||
TAG_POOL_SB_CTRL_INITIALIZE,
|
||||
TAG_POOL_SB_CTRL_DEINITIALIZE,
|
||||
TAG_POOL_SB_CTRL_INIT_REKEY,
|
||||
TAG_POOL_SB_CTRL_REKEY_VBA,
|
||||
TAG_POOL_SB_CTRL_CREATE_SNAP,
|
||||
TAG_POOL_SB_CTRL_DISCARD_SNAP
|
||||
};
|
||||
|
||||
using Pool_index = uint32_t;
|
||||
|
||||
struct Generated_prim {
|
||||
enum Type { READ, WRITE };
|
||||
|
||||
Type op;
|
||||
bool succ;
|
||||
Tag_type tg;
|
||||
Pool_index pl_idx;
|
||||
uint64_t blk_nr;
|
||||
uint64_t idx;
|
||||
};
|
||||
|
||||
Tresor::Request _request { };
|
||||
State _state { INVALID };
|
||||
Generated_prim _prim { };
|
||||
uint64_t _nr_of_blks { 0 };
|
||||
Superblock::State _sb_state { Superblock::INVALID };
|
||||
uint32_t _nr_of_requests_preponed { 0 };
|
||||
bool _request_finished { false };
|
||||
|
||||
void invalidate()
|
||||
{
|
||||
_request = { };
|
||||
_state = { INVALID };
|
||||
_prim = { };
|
||||
_nr_of_blks = 0;
|
||||
_sb_state = { Superblock::INVALID };
|
||||
}
|
||||
};
|
||||
|
||||
class Tresor::Request_pool : public Module
|
||||
{
|
||||
private:
|
||||
|
||||
enum { MAX_NR_OF_REQUESTS_PREPONED_AT_A_TIME = 8 };
|
||||
|
||||
using Channel = Request_pool_channel;
|
||||
using Request = Tresor::Request;
|
||||
using Slots_index = uint32_t;
|
||||
using Pool_index = Channel::Pool_index;
|
||||
using Generated_prim = Channel::Generated_prim;
|
||||
|
||||
enum { NR_OF_CHANNELS = 16 };
|
||||
|
||||
struct Index_queue
|
||||
{
|
||||
using Index = Slots_index;
|
||||
|
||||
Slots_index _head { 0 };
|
||||
Slots_index _tail { 0 };
|
||||
unsigned _nr_of_used_slots { 0 };
|
||||
Slots_index _slots[NR_OF_CHANNELS] { 0 };
|
||||
|
||||
bool empty() const { return _nr_of_used_slots == 0; }
|
||||
|
||||
bool full() const {
|
||||
return _nr_of_used_slots >= NR_OF_CHANNELS; }
|
||||
|
||||
Slots_index head() const
|
||||
{
|
||||
if (empty()) {
|
||||
class Index_queue_empty_head { };
|
||||
throw Index_queue_empty_head { };
|
||||
}
|
||||
return _slots[_head];
|
||||
}
|
||||
|
||||
void enqueue(Slots_index const idx)
|
||||
{
|
||||
if (full()) {
|
||||
class Index_queue_enqueue_full { };
|
||||
throw Index_queue_enqueue_full { };
|
||||
}
|
||||
|
||||
_slots[_tail] = idx;
|
||||
|
||||
_tail = (_tail + 1) % NR_OF_CHANNELS;
|
||||
|
||||
_nr_of_used_slots += 1;
|
||||
}
|
||||
|
||||
void move_one_item_towards_tail(Index idx)
|
||||
{
|
||||
Slots_index slot_idx { _head };
|
||||
Slots_index next_slot_idx;
|
||||
Index next_idx;
|
||||
|
||||
if (empty()) {
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
while (true) {
|
||||
|
||||
if (slot_idx < NR_OF_CHANNELS - 1)
|
||||
next_slot_idx = slot_idx + 1;
|
||||
else
|
||||
next_slot_idx = 0;
|
||||
|
||||
if (next_slot_idx == _tail) {
|
||||
class Exception_2 { };
|
||||
throw Exception_2 { };
|
||||
}
|
||||
if (_slots[slot_idx] == idx) {
|
||||
next_idx = _slots[next_slot_idx];
|
||||
_slots[next_slot_idx] = _slots[slot_idx];
|
||||
_slots[slot_idx] = next_idx;
|
||||
return;
|
||||
} else
|
||||
slot_idx = next_slot_idx;
|
||||
}
|
||||
}
|
||||
|
||||
bool item_is_tail(Slots_index idx) const
|
||||
{
|
||||
Slots_index slot_idx;
|
||||
|
||||
if (empty()) {
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
if (_tail > 0)
|
||||
slot_idx = _tail - 1;
|
||||
else
|
||||
slot_idx = NR_OF_CHANNELS - 1;
|
||||
|
||||
return _slots[slot_idx] == idx;
|
||||
}
|
||||
|
||||
Index next_item(Index idx) const
|
||||
{
|
||||
Slots_index slot_idx { _head };
|
||||
Slots_index next_slot_idx;
|
||||
if (empty()) {
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
while (true) {
|
||||
|
||||
if (slot_idx < NR_OF_CHANNELS - 1)
|
||||
next_slot_idx = slot_idx + 1;
|
||||
else
|
||||
next_slot_idx = 0;
|
||||
|
||||
if (next_slot_idx == _tail) {
|
||||
class Exception_2 { };
|
||||
throw Exception_2 { };
|
||||
}
|
||||
if (_slots[slot_idx] == idx)
|
||||
return _slots[next_slot_idx];
|
||||
else
|
||||
slot_idx = next_slot_idx;
|
||||
}
|
||||
}
|
||||
|
||||
void dequeue(Slots_index const idx)
|
||||
{
|
||||
if (empty() or head() != idx) {
|
||||
class Index_queue_dequeue_error { };
|
||||
throw Index_queue_dequeue_error { };
|
||||
}
|
||||
|
||||
_head = (_head + 1) % NR_OF_CHANNELS;
|
||||
|
||||
_nr_of_used_slots -= 1;
|
||||
}
|
||||
};
|
||||
|
||||
static char const *_state_to_step_label(Channel::State state);
|
||||
|
||||
void _mark_req_successful(Channel &chan,
|
||||
Slots_index idx,
|
||||
bool &progress);
|
||||
|
||||
bool _handle_failed_generated_req(Channel &chan,
|
||||
bool &progress);
|
||||
|
||||
void _mark_req_failed(Channel &chan,
|
||||
bool &progress,
|
||||
char const *str);
|
||||
|
||||
Channel _channels[NR_OF_CHANNELS] { };
|
||||
Index_queue _indices { };
|
||||
|
||||
void _execute_read (Channel &, Index_queue &, Slots_index const, bool &);
|
||||
|
||||
void _execute_write(Channel &, Index_queue &, Slots_index const, bool &);
|
||||
|
||||
void _execute_sync (Channel &, Index_queue &, Slots_index const, bool &);
|
||||
|
||||
void _execute_create_snap(Channel &channel,
|
||||
Index_queue &indices,
|
||||
Slots_index const idx,
|
||||
bool &progress);
|
||||
|
||||
void _execute_discard_snap(Channel &channel,
|
||||
Index_queue &indices,
|
||||
Slots_index const idx,
|
||||
bool &progress);
|
||||
|
||||
void _execute_rekey(Channel &chan,
|
||||
Index_queue &indices,
|
||||
Slots_index idx,
|
||||
bool &progress);
|
||||
|
||||
void _execute_extend_tree(Channel &chan,
|
||||
Slots_index idx,
|
||||
Channel::State tree_ext_step_pending,
|
||||
bool &progress);
|
||||
|
||||
void _execute_initialize(Channel &, Index_queue &, Slots_index const,
|
||||
bool &);
|
||||
void _execute_deinitialize(Channel &, Index_queue &, Slots_index const,
|
||||
bool &);
|
||||
|
||||
|
||||
/************
|
||||
** Module **
|
||||
************/
|
||||
|
||||
bool _peek_completed_request(uint8_t *buf_ptr,
|
||||
size_t buf_size) override;
|
||||
|
||||
void _drop_completed_request(Module_request &req) override;
|
||||
|
||||
void execute(bool &) override;
|
||||
|
||||
bool _peek_generated_request(uint8_t *buf_ptr,
|
||||
size_t buf_size) override;
|
||||
|
||||
void _drop_generated_request(Module_request &mod_req) override;
|
||||
|
||||
void generated_request_complete(Module_request &req) override;
|
||||
friend class Request_pool_channel;
|
||||
|
||||
public:
|
||||
|
||||
Request_pool();
|
||||
enum Operation {
|
||||
READ, WRITE, SYNC, CREATE_SNAPSHOT, DISCARD_SNAPSHOT, REKEY, EXTEND_VBD,
|
||||
EXTEND_FT, RESUME_REKEYING, DEINITIALIZE, INITIALIZE, };
|
||||
|
||||
private:
|
||||
|
||||
/************
|
||||
** Module **
|
||||
************/
|
||||
Operation _op;
|
||||
Virtual_block_address const _vba;
|
||||
Request_offset const _offset;
|
||||
Number_of_blocks const _count;
|
||||
Key_id const _key_id;
|
||||
Request_tag const _tag;
|
||||
Generation &_gen;
|
||||
bool &_success;
|
||||
|
||||
bool ready_to_submit_request() override { return !_indices.full(); }
|
||||
public:
|
||||
|
||||
void submit_request(Module_request &req) override;
|
||||
static char const *op_to_string(Operation);
|
||||
|
||||
Request(Module_id, Module_channel_id, Operation, Virtual_block_address, Request_offset,
|
||||
Number_of_blocks, Key_id, Request_tag, Generation &, bool &);
|
||||
|
||||
void print(Output &) const override;
|
||||
};
|
||||
|
||||
class Tresor::Request_pool_channel : public Module_channel
|
||||
{
|
||||
private:
|
||||
|
||||
enum State : State_uint {
|
||||
INVALID, REQ_SUBMITTED, REQ_RESUMED, REQ_GENERATED, REKEY_INIT_SUCCEEDED, PREPONED_REQUESTS_COMPLETE,
|
||||
TREE_EXTENSION_STEP_SUCCEEDED, FORWARD_TO_SB_CTRL_SUCCEEDED, ACCESS_VBA_AT_SB_CTRL_SUCCEEDED,
|
||||
REKEY_VBA_SUCCEEDED, INITIALIZE_SB_CTRL_SUCCEEDED, DEINITIALIZE_SB_CTRL_SUCCEEDED, REQ_COMPLETE };
|
||||
|
||||
State _state { INVALID };
|
||||
Number_of_blocks _num_blks { 0 };
|
||||
Superblock::State _sb_state { Superblock::INVALID };
|
||||
uint32_t _num_requests_preponed { 0 };
|
||||
bool _request_finished { false };
|
||||
bool _generated_req_success { false };
|
||||
Request_pool_channel_queue &_chan_queue;
|
||||
Request *_req_ptr { nullptr };
|
||||
|
||||
NONCOPYABLE(Request_pool_channel);
|
||||
|
||||
void _generated_req_completed(State_uint) override;
|
||||
|
||||
void _request_submitted(Module_request &req) override;
|
||||
|
||||
bool _request_complete() override { return _state == REQ_COMPLETE; }
|
||||
|
||||
void _access_vbas(bool &, Superblock_control_request::Type);
|
||||
|
||||
void _forward_to_sb_ctrl(bool &, Superblock_control_request::Type);
|
||||
|
||||
void _gen_sb_control_req(bool &, Superblock_control_request::Type, State, Virtual_block_address);
|
||||
|
||||
void _rekey(bool &);
|
||||
|
||||
void _mark_req_successful(bool &);
|
||||
|
||||
void _reset();
|
||||
|
||||
void _try_prepone_requests(bool &);
|
||||
|
||||
void _extend_tree(Superblock_control_request::Type, bool &);
|
||||
|
||||
void _initialize(bool &);
|
||||
|
||||
void _resume_request(bool &, Request::Operation);
|
||||
|
||||
public:
|
||||
|
||||
Request_pool_channel(Module_channel_id id, Request_pool_channel_queue &chan_queue) : Module_channel { REQUEST_POOL, id }, _chan_queue { chan_queue } { }
|
||||
|
||||
void execute(bool &);
|
||||
};
|
||||
|
||||
|
||||
inline char const *to_string(Tresor::Request::Operation op)
|
||||
class Tresor::Request_pool_channel_queue
|
||||
{
|
||||
switch (op) {
|
||||
case Tresor::Request::INVALID: return "invalid";
|
||||
case Tresor::Request::READ: return "read";
|
||||
case Tresor::Request::WRITE: return "write";
|
||||
case Tresor::Request::SYNC: return "sync";
|
||||
case Tresor::Request::CREATE_SNAPSHOT: return "create_snapshot";
|
||||
case Tresor::Request::DISCARD_SNAPSHOT: return "discard_snapshot";
|
||||
case Tresor::Request::REKEY: return "rekey";
|
||||
case Tresor::Request::EXTEND_VBD: return "extend_vbd";
|
||||
case Tresor::Request::EXTEND_FT: return "extend_ft";
|
||||
case Tresor::Request::RESUME_REKEYING: return "resume_rekeying";
|
||||
case Tresor::Request::DEINITIALIZE: return "deinitialize";
|
||||
case Tresor::Request::INITIALIZE: return "initialize";
|
||||
}
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
NONCOPYABLE(Request_pool_channel_queue);
|
||||
|
||||
public:
|
||||
|
||||
enum { NUM_SLOTS = 16 };
|
||||
|
||||
private:
|
||||
|
||||
using Channel = Request_pool_channel;
|
||||
using Slot_index = uint64_t;
|
||||
using Number_of_slots = uint64_t;
|
||||
|
||||
Slot_index _head { 0 };
|
||||
Slot_index _tail { 0 };
|
||||
Number_of_slots _num_used_slots { 0 };
|
||||
Channel *_slots[NUM_SLOTS] { 0 };
|
||||
|
||||
public:
|
||||
|
||||
Request_pool_channel_queue() { }
|
||||
|
||||
bool empty() const { return _num_used_slots == 0; }
|
||||
|
||||
bool full() const { return _num_used_slots >= NUM_SLOTS; }
|
||||
|
||||
Channel &head() const;
|
||||
|
||||
void enqueue(Channel &);
|
||||
|
||||
void move_one_slot_towards_tail(Channel const &);
|
||||
|
||||
bool is_tail(Channel const &) const;
|
||||
|
||||
Channel &next(Channel const &) const;
|
||||
|
||||
void dequeue(Channel const &);
|
||||
};
|
||||
|
||||
|
||||
class Tresor::Request_pool : public Module
|
||||
{
|
||||
NONCOPYABLE(Request_pool);
|
||||
|
||||
private:
|
||||
|
||||
using Channel = Request_pool_channel;
|
||||
|
||||
enum { NUM_CHANNELS = Request_pool_channel_queue::NUM_SLOTS };
|
||||
|
||||
bool _init_success { false };
|
||||
Generation _init_gen { INVALID_GENERATION };
|
||||
Request _init_req { INVALID_MODULE_ID, INVALID_MODULE_CHANNEL_ID, Request::INITIALIZE, 0, 0, 0, 0, 0, _init_gen, _init_success };
|
||||
Constructible<Channel> _channels[NUM_CHANNELS] { };
|
||||
Request_pool_channel_queue _chan_queue { };
|
||||
|
||||
public:
|
||||
|
||||
void execute(bool &) override;
|
||||
|
||||
Request_pool();
|
||||
};
|
||||
|
||||
#endif /* _TRESOR__REQUEST_POOL_H_ */
|
||||
|
@ -14,12 +14,8 @@
|
||||
#ifndef _TRESOR__SB_CHECK_H_
|
||||
#define _TRESOR__SB_CHECK_H_
|
||||
|
||||
/* base includes */
|
||||
#include <base/output.h>
|
||||
|
||||
/* tresor includes */
|
||||
#include <tresor/types.h>
|
||||
#include <tresor/module.h>
|
||||
|
||||
namespace Tresor {
|
||||
|
||||
@ -31,78 +27,66 @@ namespace Tresor {
|
||||
|
||||
class Tresor::Sb_check_request : public Module_request
|
||||
{
|
||||
public:
|
||||
|
||||
enum Type { INVALID = 0, CHECK = 1, };
|
||||
friend class Sb_check_channel;
|
||||
|
||||
private:
|
||||
|
||||
friend class Sb_check;
|
||||
friend class Sb_check_channel;
|
||||
|
||||
Type _type { INVALID };
|
||||
bool _success { false };
|
||||
bool &_success;
|
||||
|
||||
NONCOPYABLE(Sb_check_request);
|
||||
|
||||
public:
|
||||
|
||||
Sb_check_request() { }
|
||||
Sb_check_request(Module_id, Module_channel_id, bool &);
|
||||
|
||||
Sb_check_request(Module_id src_module_id,
|
||||
Module_request_id src_request_id);
|
||||
|
||||
static void create(void *buf_ptr,
|
||||
size_t buf_size,
|
||||
uint64_t src_module_id,
|
||||
uint64_t src_request_id,
|
||||
size_t req_type);
|
||||
|
||||
Type type() const { return _type; }
|
||||
|
||||
bool success() const { return _success; }
|
||||
|
||||
static char const *type_to_string(Type type);
|
||||
|
||||
|
||||
/********************
|
||||
** Module_request **
|
||||
********************/
|
||||
|
||||
void print(Output &out) const override { Genode::print(out, type_to_string(_type)); }
|
||||
void print(Output &out) const override { Genode::print(out, "check"); }
|
||||
};
|
||||
|
||||
|
||||
class Tresor::Sb_check_channel
|
||||
class Tresor::Sb_check_channel : public Module_channel
|
||||
{
|
||||
private:
|
||||
|
||||
friend class Sb_check;
|
||||
|
||||
using Request = Sb_check_request;
|
||||
|
||||
enum State { INSPECT_SBS, CHECK_SB };
|
||||
enum State { REQ_SUBMITTED, REQ_COMPLETE, READ_BLK_SUCCESSFUL, REQ_GENERATED, CHECK_VBD_SUCCESSFUL, CHECK_FT_SUCCESSFUL, CHECK_MT_SUCCESSFUL};
|
||||
|
||||
enum Sb_slot_state {
|
||||
INACTIVE, INIT, DONE,
|
||||
READ_STARTED, READ_DROPPED, READ_DONE,
|
||||
VBD_CHECK_STARTED, VBD_CHECK_DROPPED, VBD_CHECK_DONE,
|
||||
FT_CHECK_STARTED, FT_CHECK_DROPPED, FT_CHECK_DONE,
|
||||
MT_CHECK_STARTED, MT_CHECK_DROPPED, MT_CHECK_DONE };
|
||||
State _state { REQ_COMPLETE };
|
||||
Request *_req_ptr { };
|
||||
Generation _highest_gen { 0 };
|
||||
Superblock_index _highest_gen_sb_idx { 0 };
|
||||
bool _scan_for_highest_gen_sb_done { false };
|
||||
Superblock_index _sb_idx { 0 };
|
||||
Superblock _sb { };
|
||||
Snapshot_index _snap_idx { 0 };
|
||||
Constructible<Tree_root> _tree_root { };
|
||||
Block _blk { };
|
||||
bool _generated_req_success { false };
|
||||
|
||||
State _state { INSPECT_SBS };
|
||||
Request _request { };
|
||||
Generation _highest_gen { 0 };
|
||||
Superblock_index _last_sb_slot_idx { 0 };
|
||||
Sb_slot_state _sb_slot_state { INACTIVE };
|
||||
Superblock_index _sb_slot_idx { 0 };
|
||||
Superblock _sb_slot { };
|
||||
Snapshot_index _snap_idx { 0 };
|
||||
Type_1_node _vbd { };
|
||||
Type_1_node _ft { };
|
||||
Type_1_node _mt { };
|
||||
Physical_block_address _gen_prim_blk_nr { 0 };
|
||||
bool _gen_prim_success { false };
|
||||
Block _encoded_blk { };
|
||||
NONCOPYABLE(Sb_check_channel);
|
||||
|
||||
void _generated_req_completed(State_uint) override;
|
||||
|
||||
void _request_submitted(Module_request &) override;
|
||||
|
||||
bool _request_complete() override { return _state == REQ_COMPLETE; }
|
||||
|
||||
template <typename REQUEST, typename... ARGS>
|
||||
void _generate_req(State_uint state, bool &progress, ARGS &&... args)
|
||||
{
|
||||
_state = REQ_GENERATED;
|
||||
generate_req<REQUEST>(state, progress, args..., _generated_req_success);
|
||||
}
|
||||
|
||||
void _mark_req_failed(bool &, char const *);
|
||||
|
||||
void _mark_req_successful(bool &);
|
||||
|
||||
public:
|
||||
|
||||
Sb_check_channel(Module_channel_id id) : Module_channel { SB_CHECK, id } { }
|
||||
|
||||
void execute(bool &);
|
||||
};
|
||||
|
||||
|
||||
@ -110,58 +94,17 @@ class Tresor::Sb_check : public Module
|
||||
{
|
||||
private:
|
||||
|
||||
using Request = Sb_check_request;
|
||||
using Channel = Sb_check_channel;
|
||||
|
||||
enum { NR_OF_CHANNELS = 1 };
|
||||
|
||||
Channel _channels[NR_OF_CHANNELS] { };
|
||||
|
||||
static char const *_state_to_step_label(Channel::Sb_slot_state state);
|
||||
|
||||
bool _handle_failed_generated_req(Channel &chan,
|
||||
bool &progress);
|
||||
|
||||
void _execute_check(Channel &channel,
|
||||
bool &progress);
|
||||
|
||||
void _mark_req_failed(Channel &channel,
|
||||
bool &progress,
|
||||
char const *str);
|
||||
|
||||
void _mark_req_successful(Channel &channel,
|
||||
bool &progress);
|
||||
|
||||
|
||||
/************
|
||||
** Module **
|
||||
************/
|
||||
|
||||
bool _peek_completed_request(uint8_t *buf_ptr,
|
||||
size_t buf_size) override;
|
||||
|
||||
void _drop_completed_request(Module_request &req) override;
|
||||
|
||||
bool _peek_generated_request(uint8_t *buf_ptr,
|
||||
size_t buf_size) override;
|
||||
|
||||
void _drop_generated_request(Module_request &mod_req) override;
|
||||
|
||||
void generated_request_complete(Module_request &req) override;
|
||||
Constructible<Channel> _channels[1] { };
|
||||
|
||||
NONCOPYABLE(Sb_check);
|
||||
|
||||
public:
|
||||
|
||||
/************
|
||||
** Module **
|
||||
************/
|
||||
|
||||
bool ready_to_submit_request() override;
|
||||
|
||||
void submit_request(Module_request &req) override;
|
||||
Sb_check();
|
||||
|
||||
void execute(bool &) override;
|
||||
|
||||
};
|
||||
|
||||
#endif /* _TRESOR__SB_CHECK_H_ */
|
||||
|
@ -1,5 +1,6 @@
|
||||
/*
|
||||
* \brief Module for initializing the superblocks of a new Tresor
|
||||
* \author Martin Stein
|
||||
* \author Josef Soentgen
|
||||
* \date 2023-03-14
|
||||
*/
|
||||
@ -14,11 +15,8 @@
|
||||
#ifndef _TRESOR__SB_INITIALIZER_H_
|
||||
#define _TRESOR__SB_INITIALIZER_H_
|
||||
|
||||
/* base includes */
|
||||
#include <base/output.h>
|
||||
|
||||
/* tresor includes */
|
||||
#include <tresor/module.h>
|
||||
#include <tresor/types.h>
|
||||
|
||||
namespace Tresor {
|
||||
|
||||
@ -30,124 +28,78 @@ namespace Tresor {
|
||||
|
||||
class Tresor::Sb_initializer_request : public Module_request
|
||||
{
|
||||
public:
|
||||
|
||||
enum Type { INVALID = 0, INIT = 1, };
|
||||
friend class Sb_initializer_channel;
|
||||
|
||||
private:
|
||||
|
||||
friend class Sb_initializer;
|
||||
friend class Sb_initializer_channel;
|
||||
|
||||
Type _type { INVALID };
|
||||
Tree_level_index _vbd_max_level_idx { 0 };
|
||||
Tree_degree _vbd_max_child_idx { 0 };
|
||||
Number_of_leaves _vbd_nr_of_leaves { 0 };
|
||||
Tree_level_index _ft_max_level_idx { 0 };
|
||||
Tree_degree _ft_max_child_idx { 0 };
|
||||
Number_of_leaves _ft_nr_of_leaves { 0 };
|
||||
Tree_level_index _mt_max_level_idx { 0 };
|
||||
Tree_degree _mt_max_child_idx { 0 };
|
||||
Number_of_leaves _mt_nr_of_leaves { 0 };
|
||||
bool _success { false };
|
||||
Tree_level_index _vbd_max_lvl;
|
||||
Tree_degree _vbd_degree;
|
||||
Number_of_leaves _vbd_num_leaves;
|
||||
Tree_level_index _ft_max_lvl;
|
||||
Tree_degree _ft_degree;
|
||||
Number_of_leaves _ft_num_leaves;
|
||||
Tree_level_index _mt_max_lvl;
|
||||
Tree_degree _mt_degree;
|
||||
Number_of_leaves _mt_num_leaves;
|
||||
Pba_allocator &_pba_alloc;
|
||||
bool &_success;
|
||||
|
||||
NONCOPYABLE(Sb_initializer_request);
|
||||
|
||||
public:
|
||||
|
||||
Sb_initializer_request() { }
|
||||
Sb_initializer_request(Module_id, Module_channel_id, Tree_level_index, Tree_degree, Number_of_leaves,
|
||||
Tree_level_index, Tree_degree, Number_of_leaves, Tree_level_index, Tree_degree,
|
||||
Number_of_leaves, Pba_allocator &, bool &);
|
||||
|
||||
Sb_initializer_request(Module_id src_module_id,
|
||||
Module_request_id src_request_id);
|
||||
|
||||
static void create(void *buf_ptr,
|
||||
size_t buf_size,
|
||||
uint64_t src_module_id,
|
||||
uint64_t src_request_id,
|
||||
size_t req_type,
|
||||
Tree_level_index vbd_max_level_idx,
|
||||
Tree_degree vbd_max_child_idx,
|
||||
Number_of_leaves vbd_nr_of_leaves,
|
||||
Tree_level_index ft_max_level_idx,
|
||||
Tree_degree ft_max_child_idx,
|
||||
Number_of_leaves ft_nr_of_leaves,
|
||||
Tree_level_index mt_max_level_idx,
|
||||
Tree_degree mt_max_child_idx,
|
||||
Number_of_leaves mt_nr_of_leaves);
|
||||
|
||||
Type type() const { return _type; }
|
||||
|
||||
bool success() const { return _success; }
|
||||
|
||||
static char const *type_to_string(Type type);
|
||||
|
||||
|
||||
/********************
|
||||
** Module_request **
|
||||
********************/
|
||||
|
||||
void print(Output &out) const override { Genode::print(out, type_to_string(_type)); }
|
||||
void print(Output &out) const override { Genode::print(out, "init"); }
|
||||
};
|
||||
|
||||
|
||||
class Tresor::Sb_initializer_channel
|
||||
class Tresor::Sb_initializer_channel : public Module_channel
|
||||
{
|
||||
private:
|
||||
|
||||
friend class Sb_initializer;
|
||||
using Request = Sb_initializer_request;
|
||||
|
||||
enum State {
|
||||
INACTIVE, SUBMITTED, PENDING, IN_PROGRESS, SLOT_COMPLETE, COMPLETE,
|
||||
FT_REQUEST_COMPLETE,
|
||||
FT_REQUEST_IN_PROGRESS,
|
||||
FT_REQUEST_PENDING,
|
||||
MT_REQUEST_COMPLETE,
|
||||
MT_REQUEST_IN_PROGRESS,
|
||||
MT_REQUEST_PENDING,
|
||||
SYNC_REQUEST_COMPLETE,
|
||||
SYNC_REQUEST_IN_PROGRESS,
|
||||
SYNC_REQUEST_PENDING,
|
||||
TA_REQUEST_CREATE_KEY_COMPLETE,
|
||||
TA_REQUEST_CREATE_KEY_IN_PROGRESS,
|
||||
TA_REQUEST_CREATE_KEY_PENDING,
|
||||
TA_REQUEST_ENCRYPT_KEY_COMPLETE,
|
||||
TA_REQUEST_ENCRYPT_KEY_IN_PROGRESS,
|
||||
TA_REQUEST_ENCRYPT_KEY_PENDING,
|
||||
TA_REQUEST_SECURE_SB_COMPLETE,
|
||||
TA_REQUEST_SECURE_SB_IN_PROGRESS,
|
||||
TA_REQUEST_SECURE_SB_PENDING,
|
||||
VBD_REQUEST_COMPLETE,
|
||||
VBD_REQUEST_IN_PROGRESS,
|
||||
VBD_REQUEST_PENDING,
|
||||
WRITE_REQUEST_COMPLETE,
|
||||
WRITE_REQUEST_IN_PROGRESS,
|
||||
WRITE_REQUEST_PENDING,
|
||||
};
|
||||
REQ_SUBMITTED, START_NEXT_SB, SB_COMPLETE, REQ_COMPLETE, INIT_FT_SUCCEEDED, INIT_MT_SUCCEEDED,
|
||||
WRITE_HASH_TO_TA, CREATE_KEY_SUCCEEDED, ENCRYPT_KEY_SUCCEEDED, SECURE_SB_SUCCEEDED, INIT_VBD_SUCCEEDED,
|
||||
WRITE_BLK_SUCCEEDED, REQ_GENERATED };
|
||||
|
||||
State _state { INACTIVE };
|
||||
Sb_initializer_request _request { };
|
||||
Superblock_index _sb_slot_index { 0 };
|
||||
Superblock _sb { };
|
||||
Block _encoded_blk { };
|
||||
Key _key_plain { };
|
||||
Key _key_cipher { };
|
||||
Hash _sb_hash { };
|
||||
Type_1_node _vbd_node { };
|
||||
Type_1_node _ft_node { };
|
||||
Type_1_node _mt_node { };
|
||||
bool _generated_req_success { false };
|
||||
State _state { REQ_COMPLETE };
|
||||
Request *_req_ptr { };
|
||||
Superblock_index _sb_idx { 0 };
|
||||
Superblock _sb { };
|
||||
Block _blk { };
|
||||
Hash _hash { };
|
||||
Constructible<Tree_root> _vbd { };
|
||||
Constructible<Tree_root> _mt { };
|
||||
Constructible<Tree_root> _ft { };
|
||||
bool _generated_req_success { false };
|
||||
|
||||
void clean_data()
|
||||
NONCOPYABLE(Sb_initializer_channel);
|
||||
|
||||
void _generated_req_completed(State_uint) override;
|
||||
|
||||
void _request_submitted(Module_request &) override;
|
||||
|
||||
bool _request_complete() override { return _state == REQ_COMPLETE; }
|
||||
|
||||
template <typename REQUEST, typename... ARGS>
|
||||
void _generate_req(State_uint state, bool &progress, ARGS &&... args)
|
||||
{
|
||||
_sb = Superblock { };
|
||||
|
||||
memset(&_key_plain, 0, sizeof(_key_plain));
|
||||
memset(&_key_cipher, 0, sizeof(_key_cipher));
|
||||
memset(&_sb_hash, 0, sizeof(_sb_hash));
|
||||
|
||||
memset(&_vbd_node, 0, sizeof(_vbd_node));
|
||||
memset(&_ft_node, 0, sizeof(_ft_node));
|
||||
memset(&_mt_node, 0, sizeof(_mt_node));
|
||||
_state = REQ_GENERATED;
|
||||
generate_req<REQUEST>(state, progress, args..., _generated_req_success);
|
||||
}
|
||||
|
||||
void _mark_req_successful(bool &);
|
||||
|
||||
public:
|
||||
|
||||
Sb_initializer_channel(Module_channel_id id) : Module_channel { SB_INITIALIZER, id } { }
|
||||
|
||||
void execute(bool &);
|
||||
};
|
||||
|
||||
|
||||
@ -155,62 +107,17 @@ class Tresor::Sb_initializer : public Module
|
||||
{
|
||||
private:
|
||||
|
||||
using Request = Sb_initializer_request;
|
||||
using Channel = Sb_initializer_channel;
|
||||
|
||||
enum { NR_OF_CHANNELS = 1 };
|
||||
|
||||
Channel _channels[NR_OF_CHANNELS] { };
|
||||
|
||||
void _populate_sb_slot(Channel &channel,
|
||||
Physical_block_address first,
|
||||
Number_of_blocks num);
|
||||
|
||||
void _execute(Channel &channel,
|
||||
bool &progress);
|
||||
|
||||
void _execute_init(Channel &channel,
|
||||
bool &progress);
|
||||
|
||||
void _mark_req_failed(Channel &channel,
|
||||
bool &progress,
|
||||
char const *str);
|
||||
|
||||
void _mark_req_successful(Channel &channel,
|
||||
bool &progress);
|
||||
|
||||
|
||||
/************
|
||||
** Module **
|
||||
************/
|
||||
|
||||
bool _peek_completed_request(uint8_t *buf_ptr,
|
||||
size_t buf_size) override;
|
||||
|
||||
void _drop_completed_request(Module_request &req) override;
|
||||
|
||||
bool _peek_generated_request(uint8_t *buf_ptr,
|
||||
size_t buf_size) override;
|
||||
|
||||
void _drop_generated_request(Module_request &mod_req) override;
|
||||
|
||||
void generated_request_complete(Module_request &req) override;
|
||||
Constructible<Channel> _channels[1] { };
|
||||
|
||||
NONCOPYABLE(Sb_initializer);
|
||||
|
||||
public:
|
||||
|
||||
Sb_initializer();
|
||||
|
||||
/************
|
||||
** Module **
|
||||
************/
|
||||
|
||||
bool ready_to_submit_request() override;
|
||||
|
||||
void submit_request(Module_request &req) override;
|
||||
|
||||
void execute(bool &) override;
|
||||
|
||||
};
|
||||
|
||||
#endif /* _TRESOR__SB_INITIALIZER_H_ */
|
||||
|
@ -1,30 +0,0 @@
|
||||
/*
|
||||
* \brief Calculate SHA256 hash over data blocks of a size of 4096 bytes
|
||||
* \author Martin Stein
|
||||
* \date 2023-02-13
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2023 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
#ifndef _TRESOR__SHAE256_4K_HASH_H_
|
||||
#define _TRESOR__SHAE256_4K_HASH_H_
|
||||
|
||||
namespace Tresor {
|
||||
|
||||
class Block;
|
||||
class Hash;
|
||||
|
||||
void calc_sha256_4k_hash(Block const &blk,
|
||||
Hash &hash);
|
||||
|
||||
|
||||
bool check_sha256_4k_hash(Block const &blk,
|
||||
Hash const &expected_hash);
|
||||
}
|
||||
|
||||
#endif /* _TRESOR__SHAE256_4K_HASH_ */
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* \brief Module for management of the superblocks
|
||||
* \brief Module for accessing and managing the superblocks
|
||||
* \author Martin Stein
|
||||
* \date 2023-02-13
|
||||
*/
|
||||
@ -16,7 +16,9 @@
|
||||
|
||||
/* tresor includes */
|
||||
#include <tresor/types.h>
|
||||
#include <tresor/module.h>
|
||||
#include <tresor/virtual_block_device.h>
|
||||
#include <tresor/trust_anchor.h>
|
||||
#include <tresor/block_io.h>
|
||||
|
||||
namespace Tresor {
|
||||
|
||||
@ -25,378 +27,157 @@ namespace Tresor {
|
||||
class Superblock_control_channel;
|
||||
}
|
||||
|
||||
class Tresor::Superblock_control_request : public Module_request
|
||||
class Tresor::Superblock_control_request : Module_request, Noncopyable
|
||||
{
|
||||
friend class Superblock_control_channel;
|
||||
|
||||
public:
|
||||
|
||||
enum Type {
|
||||
INVALID = 0, READ_VBA = 1, WRITE_VBA = 2, SYNC = 3, INITIALIZE = 4,
|
||||
DEINITIALIZE = 5,
|
||||
VBD_EXTENSION_STEP = 6,
|
||||
FT_EXTENSION_STEP = 7,
|
||||
CREATE_SNAPSHOT = 8,
|
||||
DISCARD_SNAPSHOT = 9,
|
||||
INITIALIZE_REKEYING = 10,
|
||||
REKEY_VBA = 11
|
||||
};
|
||||
READ_VBA, WRITE_VBA, SYNC, INITIALIZE, DEINITIALIZE, VBD_EXTENSION_STEP,
|
||||
FT_EXTENSION_STEP, CREATE_SNAPSHOT, DISCARD_SNAPSHOT, INITIALIZE_REKEYING,
|
||||
REKEY_VBA };
|
||||
|
||||
private:
|
||||
|
||||
friend class Superblock_control;
|
||||
friend class Superblock_control_channel;
|
||||
|
||||
Type _type { INVALID };
|
||||
uint64_t _client_req_offset { 0 };
|
||||
uint64_t _client_req_tag { 0 };
|
||||
Virtual_block_address _vba { 0 };
|
||||
Superblock::State _sb_state { Superblock::INVALID };
|
||||
Number_of_blocks _nr_of_blks { 0 };
|
||||
bool _success { false };
|
||||
bool _request_finished { false };
|
||||
addr_t _generation_ptr { 0 };
|
||||
Type const _type;
|
||||
Request_offset const _client_req_offset;
|
||||
Request_tag const _client_req_tag;
|
||||
Number_of_blocks _nr_of_blks;
|
||||
Virtual_block_address const _vba;
|
||||
bool &_success;
|
||||
bool &_client_req_finished;
|
||||
Superblock::State &_sb_state;
|
||||
Generation &_gen;
|
||||
|
||||
public:
|
||||
|
||||
Superblock_control_request() { }
|
||||
Superblock_control_request(Module_id, Module_channel_id, Type, Request_offset,
|
||||
Request_tag, Number_of_blocks, Virtual_block_address,
|
||||
bool &, bool &, Superblock::State &, Generation &);
|
||||
|
||||
Type type() const { return _type; }
|
||||
static char const *type_to_string(Type);
|
||||
|
||||
Superblock_control_request(Module_id src_module_id,
|
||||
Module_request_id src_request_id);
|
||||
|
||||
static void create(void *buf_ptr,
|
||||
size_t buf_size,
|
||||
uint64_t src_module_id,
|
||||
uint64_t src_request_id,
|
||||
size_t req_type,
|
||||
uint64_t client_req_offset,
|
||||
uint64_t client_req_tag,
|
||||
Number_of_blocks nr_of_blks,
|
||||
uint64_t vba,
|
||||
Generation &gen);
|
||||
|
||||
Superblock::State sb_state() { return _sb_state; }
|
||||
|
||||
Generation gen() const { return *(Generation const *)_generation_ptr; }
|
||||
|
||||
void gen(Generation g) { *(Generation *)_generation_ptr = g; }
|
||||
|
||||
bool success() const { return _success; }
|
||||
|
||||
bool request_finished() const { return _request_finished; }
|
||||
|
||||
static char const *type_to_string(Type type);
|
||||
|
||||
|
||||
/********************
|
||||
** Module_request **
|
||||
********************/
|
||||
|
||||
void print(Output &out) const override
|
||||
{
|
||||
Genode::print(out, type_to_string(_type));
|
||||
switch (_type) {
|
||||
case REKEY_VBA:
|
||||
case READ_VBA:
|
||||
case WRITE_VBA:
|
||||
Genode::print(out, " ", _vba);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
void print(Output &) const override;
|
||||
};
|
||||
|
||||
class Tresor::Superblock_control_channel
|
||||
|
||||
class Tresor::Superblock_control_channel : public Module_channel
|
||||
{
|
||||
private:
|
||||
|
||||
friend class Superblock_control;
|
||||
using Request = Superblock_control_request;
|
||||
|
||||
enum State {
|
||||
SUBMITTED,
|
||||
READ_VBA_AT_VBD_PENDING,
|
||||
READ_VBA_AT_VBD_IN_PROGRESS,
|
||||
READ_VBA_AT_VBD_COMPLETED,
|
||||
WRITE_VBA_AT_VBD_PENDING,
|
||||
WRITE_VBA_AT_VBD_IN_PROGRESS,
|
||||
WRITE_VBA_AT_VBD_COMPLETED,
|
||||
READ_SB_PENDING,
|
||||
READ_SB_IN_PROGRESS,
|
||||
READ_SB_COMPLETED,
|
||||
READ_CURRENT_SB_PENDING,
|
||||
READ_CURRENT_SB_IN_PROGRESS,
|
||||
READ_CURRENT_SB_COMPLETED,
|
||||
REKEY_VBA_IN_VBD_PENDING,
|
||||
REKEY_VBA_IN_VBD_IN_PROGRESS,
|
||||
REKEY_VBA_IN_VBD_COMPLETED,
|
||||
VBD_EXT_STEP_IN_VBD_PENDING,
|
||||
VBD_EXT_STEP_IN_VBD_IN_PROGRESS,
|
||||
FT_EXT_STEP_IN_FT_PENDING,
|
||||
FT_EXT_STEP_IN_FT_IN_PROGRESS,
|
||||
TREE_EXT_STEP_IN_TREE_COMPLETED,
|
||||
CREATE_KEY_PENDING,
|
||||
CREATE_KEY_IN_PROGRESS,
|
||||
CREATE_KEY_COMPLETED,
|
||||
ENCRYPT_CURRENT_KEY_PENDING,
|
||||
ENCRYPT_CURRENT_KEY_IN_PROGRESS,
|
||||
ENCRYPT_CURRENT_KEY_COMPLETED,
|
||||
ENCRYPT_PREVIOUS_KEY_PENDING,
|
||||
ENCRYPT_PREVIOUS_KEY_IN_PROGRESS,
|
||||
ENCRYPT_PREVIOUS_KEY_COMPLETED,
|
||||
DECRYPT_CURRENT_KEY_PENDING,
|
||||
DECRYPT_CURRENT_KEY_IN_PROGRESS,
|
||||
DECRYPT_CURRENT_KEY_COMPLETED,
|
||||
DECRYPT_PREVIOUS_KEY_PENDING,
|
||||
DECRYPT_PREVIOUS_KEY_IN_PROGRESS,
|
||||
DECRYPT_PREVIOUS_KEY_COMPLETED,
|
||||
SYNC_CACHE_PENDING,
|
||||
SYNC_CACHE_IN_PROGRESS,
|
||||
SYNC_CACHE_COMPLETED,
|
||||
ADD_KEY_AT_CRYPTO_MODULE_PENDING,
|
||||
ADD_KEY_AT_CRYPTO_MODULE_IN_PROGRESS,
|
||||
ADD_KEY_AT_CRYPTO_MODULE_COMPLETED,
|
||||
ADD_PREVIOUS_KEY_AT_CRYPTO_MODULE_PENDING,
|
||||
ADD_PREVIOUS_KEY_AT_CRYPTO_MODULE_IN_PROGRESS,
|
||||
ADD_PREVIOUS_KEY_AT_CRYPTO_MODULE_COMPLETED,
|
||||
ADD_CURRENT_KEY_AT_CRYPTO_MODULE_PENDING,
|
||||
ADD_CURRENT_KEY_AT_CRYPTO_MODULE_IN_PROGRESS,
|
||||
ADD_CURRENT_KEY_AT_CRYPTO_MODULE_COMPLETED,
|
||||
REMOVE_PREVIOUS_KEY_AT_CRYPTO_MODULE_PENDING,
|
||||
REMOVE_PREVIOUS_KEY_AT_CRYPTO_MODULE_IN_PROGRESS,
|
||||
REMOVE_PREVIOUS_KEY_AT_CRYPTO_MODULE_COMPLETED,
|
||||
REMOVE_CURRENT_KEY_AT_CRYPTO_MODULE_PENDING,
|
||||
REMOVE_CURRENT_KEY_AT_CRYPTO_MODULE_IN_PROGRESS,
|
||||
REMOVE_CURRENT_KEY_AT_CRYPTO_MODULE_COMPLETED,
|
||||
WRITE_SB_PENDING,
|
||||
WRITE_SB_IN_PROGRESS,
|
||||
WRITE_SB_COMPLETED,
|
||||
SYNC_BLK_IO_PENDING,
|
||||
SYNC_BLK_IO_IN_PROGRESS,
|
||||
SYNC_BLK_IO_COMPLETED,
|
||||
SECURE_SB_PENDING,
|
||||
SECURE_SB_IN_PROGRESS,
|
||||
SECURE_SB_COMPLETED,
|
||||
MAX_SB_HASH_PENDING,
|
||||
MAX_SB_HASH_IN_PROGRESS,
|
||||
MAX_SB_HASH_COMPLETED,
|
||||
COMPLETED
|
||||
};
|
||||
enum State : State_uint {
|
||||
INACTIVE, REQ_SUBMITTED, ACCESS_VBA_AT_VBD_SUCCEEDED,
|
||||
REKEY_VBA_AT_VBD_SUCCEEDED, CREATE_KEY_SUCCEEDED,
|
||||
TREE_EXT_STEP_IN_TREE_SUCCEEDED, DECRYPT_CURR_KEY_SUCCEEDED,
|
||||
DECRYPT_PREV_KEY_SUCCEEDED, READ_SB_HASH_SUCCEEDED, ADD_PREV_KEY_SUCCEEDED,
|
||||
ADD_CURR_KEY_SUCCEEDED, REMOVE_PREV_KEY_SUCCEEDED, REMOVE_CURR_KEY_SUCCEEDED,
|
||||
READ_SB_SUCCEEDED, REQ_COMPLETE, REQ_GENERATED, SECURE_SB, SECURE_SB_SUCCEEDED };
|
||||
|
||||
enum Tag_type {
|
||||
TAG_SB_CTRL_VBD_VBD_EXT_STEP,
|
||||
TAG_SB_CTRL_FT_FT_EXT_STEP,
|
||||
TAG_SB_CTRL_VBD_RKG_REKEY_VBA,
|
||||
TAG_SB_CTRL_VBD_RKG_READ_VBA,
|
||||
TAG_SB_CTRL_VBD_RKG_WRITE_VBA,
|
||||
TAG_SB_CTRL_TA_ENCRYPT_KEY,
|
||||
TAG_SB_CTRL_CACHE,
|
||||
TAG_SB_CTRL_BLK_IO_READ_SB,
|
||||
TAG_SB_CTRL_BLK_IO_WRITE_SB,
|
||||
TAG_SB_CTRL_BLK_IO_SYNC,
|
||||
TAG_SB_CTRL_TA_SECURE_SB,
|
||||
TAG_SB_CTRL_TA_LAST_SB_HASH,
|
||||
TAG_SB_CTRL_TA_DECRYPT_KEY,
|
||||
TAG_SB_CTRL_TA_CREATE_KEY,
|
||||
TAG_SB_CTRL_CRYPTO_ADD_KEY,
|
||||
TAG_SB_CTRL_CRYPTO_REMOVE_KEY,
|
||||
};
|
||||
enum Secure_sb_state : State_uint {
|
||||
SECURE_SB_INACTIVE, STARTED, ENCRYPT_CURR_KEY_SUCCEEDED,
|
||||
SECURE_SB_REQ_GENERATED, ENCRYPT_PREV_KEY_SUCCEEDED, SYNC_CACHE_SUCCEEDED,
|
||||
WRITE_SB_SUCCEEDED, SYNC_BLK_IO_SUCCEEDED, WRITE_SB_HASH_SUCCEEDED };
|
||||
|
||||
struct Generated_prim
|
||||
State _state { INACTIVE };
|
||||
Constructible<Tree_root> _ft { };
|
||||
Constructible<Tree_root> _mt { };
|
||||
Secure_sb_state _secure_sb_state { SECURE_SB_INACTIVE };
|
||||
Superblock _sb_ciphertext { };
|
||||
Block _blk { };
|
||||
Generation _gen { INVALID_GENERATION };
|
||||
Hash _hash { };
|
||||
Physical_block_address _pba { INVALID_PBA };
|
||||
Number_of_blocks _nr_of_leaves { 0 };
|
||||
Request *_req_ptr { nullptr };
|
||||
bool _gen_req_success { false };
|
||||
Superblock &_sb;
|
||||
Superblock_index &_sb_idx;
|
||||
Generation &_curr_gen;
|
||||
|
||||
NONCOPYABLE(Superblock_control_channel);
|
||||
|
||||
void _generated_req_completed(State_uint) override;
|
||||
|
||||
void _request_submitted(Module_request &) override;
|
||||
|
||||
bool _request_complete() override { return _state == REQ_COMPLETE; }
|
||||
|
||||
void _mark_req_successful(bool &);
|
||||
|
||||
void _mark_req_failed(bool &, char const *);
|
||||
|
||||
void _access_vba(Virtual_block_device_request::Type, bool &);
|
||||
|
||||
void _generate_vbd_req(Virtual_block_device_request::Type, State_uint, bool &, Key_id, Virtual_block_address);
|
||||
|
||||
template <typename REQUEST, typename... ARGS>
|
||||
void _generate_req(State_uint complete_state, bool &progress, ARGS &&... args)
|
||||
{
|
||||
enum Type { READ, WRITE, SYNC };
|
||||
generate_req<REQUEST>(complete_state, progress, args..., _gen_req_success);
|
||||
if (_state == SECURE_SB)
|
||||
_secure_sb_state = SECURE_SB_REQ_GENERATED;
|
||||
else
|
||||
_state = REQ_GENERATED;
|
||||
}
|
||||
|
||||
Type op { READ };
|
||||
bool succ { false };
|
||||
Tag_type tg { };
|
||||
uint64_t blk_nr { 0 };
|
||||
uint64_t idx { 0 };
|
||||
};
|
||||
void _start_secure_sb(bool &);
|
||||
|
||||
State _state { SUBMITTED };
|
||||
Superblock_control_request _request { };
|
||||
Generated_prim _generated_prim { };
|
||||
Key _key_plaintext { };
|
||||
Superblock _sb_ciphertext { };
|
||||
Block _encoded_blk { };
|
||||
Superblock_index _sb_idx { 0 };
|
||||
bool _sb_found { false };
|
||||
Superblock_index _read_sb_idx { 0 };
|
||||
Generation _generation { 0 };
|
||||
Snapshots _snapshots { };
|
||||
Hash _hash { };
|
||||
Key _curr_key_plaintext { };
|
||||
Key _prev_key_plaintext { };
|
||||
Physical_block_address _pba { 0 };
|
||||
Number_of_blocks _nr_of_leaves { 0 };
|
||||
Type_1_node _ft_root { };
|
||||
Tree_level_index _ft_max_lvl { 0 };
|
||||
Number_of_leaves _ft_nr_of_leaves { 0 };
|
||||
void _secure_sb(bool &);
|
||||
|
||||
void _tree_ext_step(Superblock::State, bool, String<4>, bool &);
|
||||
|
||||
void _rekey_vba(bool &);
|
||||
|
||||
void _init_rekeying(bool &);
|
||||
|
||||
void _discard_snap(bool &);
|
||||
|
||||
void _create_snap(bool &);
|
||||
|
||||
void _sync(bool &);
|
||||
|
||||
void _initialize(bool &);
|
||||
|
||||
void _deinitialize(bool &);
|
||||
|
||||
public:
|
||||
|
||||
Superblock_control_request const &request() const { return _request; }
|
||||
void execute(bool &);
|
||||
|
||||
Superblock_control_channel(Module_channel_id, Superblock &, Superblock_index &, Generation &);
|
||||
};
|
||||
|
||||
class Tresor::Superblock_control : public Module
|
||||
{
|
||||
private:
|
||||
|
||||
using Request = Superblock_control_request;
|
||||
using Channel = Superblock_control_channel;
|
||||
using Generated_prim = Channel::Generated_prim;
|
||||
using Tag = Channel::Tag_type;
|
||||
|
||||
enum { NR_OF_CHANNELS = 1 };
|
||||
enum { NUM_CHANNELS = 1 };
|
||||
|
||||
Superblock _sb { };
|
||||
Superblock_index _sb_idx { 0 };
|
||||
Generation _curr_gen { 0 };
|
||||
Channel _channels[NR_OF_CHANNELS] { };
|
||||
|
||||
void _mark_req_failed(Channel &chan,
|
||||
bool &progress,
|
||||
char const *str);
|
||||
|
||||
void _mark_req_successful(Channel &chan,
|
||||
bool &progress);
|
||||
|
||||
static char const *_state_to_step_label(Channel::State state);
|
||||
|
||||
bool _handle_failed_generated_req(Channel &chan,
|
||||
bool &progress);
|
||||
|
||||
void _secure_sb_init(Channel &chan,
|
||||
uint64_t chan_idx,
|
||||
bool &progress);
|
||||
|
||||
void _secure_sb_encr_curr_key_compl(Channel &chan,
|
||||
uint64_t chan_idx,
|
||||
bool &progress);
|
||||
|
||||
void _secure_sb_encr_prev_key_compl(Channel &chan,
|
||||
uint64_t chan_idx,
|
||||
bool &progress);
|
||||
|
||||
void _secure_sb_sync_cache_compl(Channel &chan,
|
||||
uint64_t chan_idx,
|
||||
bool &progress);
|
||||
|
||||
void _secure_sb_write_sb_compl(Channel &chan,
|
||||
uint64_t chan_idx,
|
||||
bool &progress);
|
||||
|
||||
void _secure_sb_sync_blk_io_compl(Channel &chan,
|
||||
uint64_t chan_idx,
|
||||
bool &progress);
|
||||
|
||||
bool _secure_sb_finish(Channel &chan,
|
||||
bool &progress);
|
||||
|
||||
void _init_sb_without_key_values(Superblock const &, Superblock &);
|
||||
|
||||
void _execute_sync(Channel &, uint64_t const job_idx, Superblock &,
|
||||
Superblock_index &, Generation &, bool &progress);
|
||||
|
||||
void _execute_create_snap(Channel &, uint64_t, bool &progress);
|
||||
|
||||
void _execute_discard_snap(Channel &, uint64_t, bool &progress);
|
||||
|
||||
void _execute_tree_ext_step(Channel &chan,
|
||||
uint64_t chan_idx,
|
||||
Superblock::State tree_ext_sb_state,
|
||||
bool tree_ext_verbose,
|
||||
Tag tree_ext_tag,
|
||||
Channel::State tree_ext_pending_state,
|
||||
String<4> tree_name,
|
||||
bool &progress);
|
||||
|
||||
void _execute_rekey_vba(Channel &chan,
|
||||
uint64_t chan_idx,
|
||||
bool &progress);
|
||||
|
||||
void _execute_initialize_rekeying(Channel &chan,
|
||||
uint64_t chan_idx,
|
||||
bool &progress);
|
||||
|
||||
void _execute_read_vba(Channel &, uint64_t const job_idx,
|
||||
Superblock const &, bool &progress);
|
||||
|
||||
void _execute_write_vba(Channel &, uint64_t const job_idx,
|
||||
Superblock &, Generation const &, bool &progress);
|
||||
|
||||
void _execute_initialize(Channel &, uint64_t const job_idx,
|
||||
Superblock &, Superblock_index &,
|
||||
Generation &, bool &progress);
|
||||
|
||||
void _execute_deinitialize(Channel &, uint64_t const job_idx,
|
||||
Superblock &, Superblock_index &,
|
||||
Generation &, bool &progress);
|
||||
|
||||
|
||||
/************
|
||||
** Module **
|
||||
************/
|
||||
|
||||
bool ready_to_submit_request() override;
|
||||
|
||||
void submit_request(Module_request &req) override;
|
||||
|
||||
bool _peek_completed_request(uint8_t *buf_ptr,
|
||||
size_t buf_size) override;
|
||||
|
||||
void _drop_completed_request(Module_request &req) override;
|
||||
Superblock _sb { };
|
||||
Superblock_index _sb_idx { INVALID_SB_IDX };
|
||||
Generation _curr_gen { INVALID_GENERATION };
|
||||
Constructible<Channel> _channels[NUM_CHANNELS] { };
|
||||
|
||||
void execute(bool &) override;
|
||||
|
||||
bool _peek_generated_request(uint8_t *buf_ptr,
|
||||
size_t buf_size) override;
|
||||
|
||||
void _drop_generated_request(Module_request &mod_req) override;
|
||||
|
||||
void generated_request_complete(Module_request &req) override;
|
||||
|
||||
public:
|
||||
|
||||
Virtual_block_address max_vba() const;
|
||||
Virtual_block_address max_vba() const { return _sb.valid() ? _sb.max_vba() : 0; };
|
||||
|
||||
Virtual_block_address resizing_nr_of_pbas() const;
|
||||
Virtual_block_address resizing_nr_of_pbas() const { return _sb.resizing_nr_of_pbas; }
|
||||
|
||||
Virtual_block_address rekeying_vba() const;
|
||||
Virtual_block_address rekeying_vba() const { return _sb.rekeying_vba; }
|
||||
|
||||
void snapshot_generations(Snapshot_generations &generations) const
|
||||
{
|
||||
if (_sb.valid()) {
|
||||
Snapshots_info snapshots_info() const;
|
||||
|
||||
for (Snapshot_index idx { 0 };
|
||||
idx < MAX_NR_OF_SNAPSHOTS;
|
||||
idx++) {
|
||||
Superblock_info sb_info() const;
|
||||
|
||||
Snapshot const &snap { _sb.snapshots.items[idx] };
|
||||
if (snap.valid && snap.keep)
|
||||
generations.items[idx] = snap.gen;
|
||||
else
|
||||
generations.items[idx] = INVALID_GENERATION;
|
||||
}
|
||||
} else {
|
||||
|
||||
generations = Snapshot_generations { };
|
||||
}
|
||||
}
|
||||
|
||||
Superblock_info sb_info() const
|
||||
{
|
||||
if (_sb.valid())
|
||||
|
||||
return Superblock_info {
|
||||
true, _sb.state == Superblock::REKEYING,
|
||||
_sb.state == Superblock::EXTENDING_FT,
|
||||
_sb.state == Superblock::EXTENDING_VBD };
|
||||
|
||||
else
|
||||
|
||||
return Superblock_info { };
|
||||
}
|
||||
Superblock_control();
|
||||
};
|
||||
|
||||
#endif /* _TRESOR__SUPERBLOCK_CONTROL_H_ */
|
||||
|
@ -16,8 +16,7 @@
|
||||
|
||||
/* tresor includes */
|
||||
#include <tresor/types.h>
|
||||
#include <tresor/module.h>
|
||||
#include <tresor/vfs_utilities.h>
|
||||
#include <tresor/file.h>
|
||||
|
||||
namespace Tresor {
|
||||
|
||||
@ -28,73 +27,78 @@ namespace Tresor {
|
||||
|
||||
class Tresor::Trust_anchor_request : public Module_request
|
||||
{
|
||||
friend class Trust_anchor_channel;
|
||||
|
||||
public:
|
||||
|
||||
enum Type {
|
||||
INVALID = 0, CREATE_KEY = 1, ENCRYPT_KEY = 2, DECRYPT_KEY = 3,
|
||||
SECURE_SUPERBLOCK = 4, GET_LAST_SB_HASH = 5, INITIALIZE = 6 };
|
||||
enum Type { CREATE_KEY, ENCRYPT_KEY, DECRYPT_KEY, WRITE_HASH, READ_HASH, INITIALIZE };
|
||||
|
||||
private:
|
||||
|
||||
friend class Trust_anchor;
|
||||
friend class Trust_anchor_channel;
|
||||
Type const _type;
|
||||
Key_value &_key_plaintext;
|
||||
Key_value &_key_ciphertext;
|
||||
Hash &_hash;
|
||||
Passphrase const _pass;
|
||||
bool &_success;
|
||||
|
||||
Type _type { INVALID };
|
||||
uint8_t _key_plaintext[KEY_SIZE] { 0 };
|
||||
uint8_t _key_ciphertext[KEY_SIZE] { 0 };
|
||||
Hash _hash { };
|
||||
addr_t _passphrase_ptr { 0 };
|
||||
bool _success { false };
|
||||
NONCOPYABLE(Trust_anchor_request);
|
||||
|
||||
public:
|
||||
|
||||
Trust_anchor_request() { }
|
||||
Trust_anchor_request(Module_id src, Module_channel_id, Type, Key_value &, Key_value &, Hash &, Passphrase, bool &);
|
||||
|
||||
Trust_anchor_request(Module_id src_module_id,
|
||||
Module_request_id src_request_id);
|
||||
|
||||
static void create(void *buf_ptr,
|
||||
size_t buf_size,
|
||||
uint64_t src_module_id,
|
||||
uint64_t src_request_id,
|
||||
size_t req_type,
|
||||
void *key_plaintext_ptr,
|
||||
void *key_ciphertext_ptr,
|
||||
char const *passphrase_ptr,
|
||||
void *hash_ptr);
|
||||
|
||||
void *hash_ptr() { return (void *)&_hash; }
|
||||
void *key_plaintext_ptr() { return (void *)&_key_plaintext; }
|
||||
void *key_ciphertext_ptr() { return (void *)&_key_ciphertext; }
|
||||
|
||||
Type type() const { return _type; }
|
||||
|
||||
bool success() const { return _success; }
|
||||
|
||||
static char const *type_to_string(Type type);
|
||||
|
||||
|
||||
/********************
|
||||
** Module_request **
|
||||
********************/
|
||||
static char const *type_to_string(Type);
|
||||
|
||||
void print(Output &out) const override { Genode::print(out, type_to_string(_type)); }
|
||||
};
|
||||
|
||||
class Tresor::Trust_anchor_channel
|
||||
class Tresor::Trust_anchor_channel : public Module_channel
|
||||
{
|
||||
private:
|
||||
|
||||
friend class Trust_anchor;
|
||||
using Request = Trust_anchor_request;
|
||||
|
||||
enum State {
|
||||
INACTIVE, SUBMITTED, WRITE_PENDING, WRITE_IN_PROGRESS,
|
||||
READ_PENDING, READ_IN_PROGRESS, COMPLETE };
|
||||
enum State { REQ_SUBMITTED, REQ_COMPLETE, READ_OK, WRITE_OK, FILE_ERR };
|
||||
|
||||
State _state { INACTIVE };
|
||||
Trust_anchor_request _request { };
|
||||
Vfs::file_offset _file_offset { 0 };
|
||||
size_t _file_size { 0 };
|
||||
State _state { REQ_COMPLETE };
|
||||
Vfs::Env &_vfs_env;
|
||||
char _result_buf[3];
|
||||
Tresor::Path const _path;
|
||||
Read_write_file<State> _decrypt_file { _state, _vfs_env, { _path, "/decrypt" } };
|
||||
Read_write_file<State> _encrypt_file { _state, _vfs_env, { _path, "/encrypt" } };
|
||||
Read_write_file<State> _generate_key_file { _state, _vfs_env, { _path, "/generate_key" } };
|
||||
Read_write_file<State> _initialize_file { _state, _vfs_env, { _path, "/initialize" } };
|
||||
Read_write_file<State> _hashsum_file { _state, _vfs_env, { _path, "/hashsum" } };
|
||||
Trust_anchor_request *_req_ptr { nullptr };
|
||||
|
||||
NONCOPYABLE(Trust_anchor_channel);
|
||||
|
||||
void _request_submitted(Module_request &) override;
|
||||
|
||||
bool _request_complete() override { return _state == REQ_COMPLETE; }
|
||||
|
||||
void _create_key(bool &);
|
||||
|
||||
void _read_hash(bool &);
|
||||
|
||||
void _initialize(bool &);
|
||||
|
||||
void _write_hash(bool &);
|
||||
|
||||
void _encrypt_key(bool &);
|
||||
|
||||
void _decrypt_key(bool &);
|
||||
|
||||
void _mark_req_failed(bool &, Error_string);
|
||||
|
||||
void _mark_req_successful(bool &);
|
||||
|
||||
public:
|
||||
|
||||
void execute(bool &);
|
||||
|
||||
Trust_anchor_channel(Module_channel_id, Vfs::Env &, Xml_node const &);
|
||||
};
|
||||
|
||||
class Tresor::Trust_anchor : public Module
|
||||
@ -103,71 +107,50 @@ class Tresor::Trust_anchor : public Module
|
||||
|
||||
using Request = Trust_anchor_request;
|
||||
using Channel = Trust_anchor_channel;
|
||||
using Read_result = Vfs::File_io_service::Read_result;
|
||||
using Write_result = Vfs::File_io_service::Write_result;
|
||||
|
||||
enum { NR_OF_CHANNELS = 1 };
|
||||
Constructible<Channel> _channels[1] { };
|
||||
|
||||
Vfs::Env &_vfs_env;
|
||||
char _read_buf[64];
|
||||
String<128> const _path;
|
||||
String<128> const _decrypt_path { _path, "/decrypt" };
|
||||
Vfs::Vfs_handle &_decrypt_file { vfs_open_rw(_vfs_env, { _decrypt_path }) };
|
||||
String<128> const _encrypt_path { _path, "/encrypt" };
|
||||
Vfs::Vfs_handle &_encrypt_file { vfs_open_rw(_vfs_env, { _encrypt_path }) };
|
||||
String<128> const _generate_key_path { _path, "/generate_key" };
|
||||
Vfs::Vfs_handle &_generate_key_file { vfs_open_rw(_vfs_env, { _generate_key_path }) };
|
||||
String<128> const _initialize_path { _path, "/initialize" };
|
||||
Vfs::Vfs_handle &_initialize_file { vfs_open_rw(_vfs_env, { _initialize_path }) };
|
||||
String<128> const _hashsum_path { _path, "/hashsum" };
|
||||
Vfs::Vfs_handle &_hashsum_file { vfs_open_rw(_vfs_env, { _hashsum_path }) };
|
||||
Channel _channels[NR_OF_CHANNELS] { };
|
||||
|
||||
void
|
||||
_execute_write_read_operation(Vfs::Vfs_handle &file,
|
||||
String<128> const &file_path,
|
||||
Channel &channel,
|
||||
char const *write_buf,
|
||||
char *read_buf,
|
||||
size_t read_size,
|
||||
bool &progress);
|
||||
|
||||
void _execute_write_operation(Vfs::Vfs_handle &file,
|
||||
String<128> const &file_path,
|
||||
Channel &channel,
|
||||
char const *write_buf,
|
||||
bool &progress,
|
||||
bool result_via_read);
|
||||
|
||||
void _execute_read_operation(Vfs::Vfs_handle &file,
|
||||
String<128> const &file_path,
|
||||
Channel &channel,
|
||||
char *read_buf,
|
||||
bool &progress);
|
||||
|
||||
|
||||
/************
|
||||
** Module **
|
||||
************/
|
||||
|
||||
bool _peek_completed_request(uint8_t *buf_ptr,
|
||||
size_t buf_size) override;
|
||||
|
||||
void _drop_completed_request(Module_request &req) override;
|
||||
NONCOPYABLE(Trust_anchor);
|
||||
|
||||
public:
|
||||
|
||||
Trust_anchor(Vfs::Env &vfs_env,
|
||||
Xml_node const &xml_node);
|
||||
struct Create_key : Request
|
||||
{
|
||||
Create_key(Module_id m, Module_channel_id c, Key_value &k, bool &s)
|
||||
: Request(m, c, Request::CREATE_KEY, k, *(Key_value*)0, *(Hash*)0, Passphrase(), s) { }
|
||||
};
|
||||
|
||||
struct Encrypt_key : Request
|
||||
{
|
||||
Encrypt_key(Module_id m, Module_channel_id c, Key_value const &kp, Key_value &kc, bool &s)
|
||||
: Request(m, c, Request::ENCRYPT_KEY, *const_cast<Key_value*>(&kp), kc, *(Hash*)0, Passphrase(), s) { }
|
||||
};
|
||||
|
||||
/************
|
||||
** Module **
|
||||
************/
|
||||
struct Decrypt_key : Request
|
||||
{
|
||||
Decrypt_key(Module_id m, Module_channel_id c, Key_value &kp, Key_value const &kc, bool &s)
|
||||
: Request(m, c, Request::DECRYPT_KEY, kp, *const_cast<Key_value*>(&kc), *(Hash*)0, Passphrase(), s) { }
|
||||
};
|
||||
|
||||
bool ready_to_submit_request() override;
|
||||
struct Write_hash : Request
|
||||
{
|
||||
Write_hash(Module_id m, Module_channel_id c, Hash const &h, bool &s)
|
||||
: Request(m, c, Request::WRITE_HASH, *(Key_value*)0, *(Key_value*)0, *const_cast<Hash*>(&h), Passphrase(), s) { }
|
||||
};
|
||||
|
||||
void submit_request(Module_request &req) override;
|
||||
struct Read_hash : Request
|
||||
{
|
||||
Read_hash(Module_id m, Module_channel_id c, Hash &h, bool &s)
|
||||
: Request(m, c, Request::READ_HASH, *(Key_value*)0, *(Key_value*)0, h, Passphrase(), s) { }
|
||||
};
|
||||
|
||||
struct Initialize : Request
|
||||
{
|
||||
Initialize(Module_id src_mod, Module_channel_id src_chan, Passphrase pass, bool &succ)
|
||||
: Request(src_mod, src_chan, Request::INITIALIZE, *(Key_value*)0, *(Key_value*)0, *(Hash*)0, pass, succ) { }
|
||||
};
|
||||
|
||||
Trust_anchor(Vfs::Env &, Xml_node const &);
|
||||
|
||||
void execute(bool &) override;
|
||||
};
|
||||
|
@ -16,16 +16,14 @@
|
||||
#define _TRESOR__TYPES_H_
|
||||
|
||||
/* base includes */
|
||||
#include <base/output.h>
|
||||
#include <util/string.h>
|
||||
#include <util/reconstructible.h>
|
||||
|
||||
/* os includes */
|
||||
#include <util/formatted_output.h>
|
||||
|
||||
/* tresor includes */
|
||||
#include <tresor/verbosity.h>
|
||||
#include <tresor/math.h>
|
||||
#include <tresor/assertion.h>
|
||||
#include <tresor/module.h>
|
||||
|
||||
namespace Tresor {
|
||||
|
||||
@ -46,9 +44,15 @@ namespace Tresor {
|
||||
using Snapshot_index = uint32_t;
|
||||
using Superblock_index = uint8_t;
|
||||
using On_disc_bool = uint8_t;
|
||||
using Request_offset = uint64_t;
|
||||
using Request_tag = uint64_t;
|
||||
using Passphrase = String<64>;
|
||||
using Error_string = String<128>;
|
||||
|
||||
enum { BLOCK_SIZE = 4096 };
|
||||
enum { INVALID_KEY_ID = 0 };
|
||||
enum { INVALID_REQ_TAG = 0xffff'ffff };
|
||||
enum { INVALID_SB_IDX = 0xff };
|
||||
enum { INVALID_GENERATION = 0 };
|
||||
enum { INITIAL_GENERATION = 0 };
|
||||
enum { MAX_PBA = 0xffff'ffff'ffff'ffff };
|
||||
@ -57,18 +61,12 @@ namespace Tresor {
|
||||
enum { MAX_GENERATION = 0xffff'ffff'ffff'ffff };
|
||||
enum { MAX_SNAP_ID = 0xffff'ffff };
|
||||
enum { HASH_SIZE = 32 };
|
||||
enum { T1_NODE_STORAGE_SIZE = 64 };
|
||||
enum { T2_NODE_STORAGE_SIZE = 64 };
|
||||
enum { NR_OF_T2_NODES_PER_BLK = (size_t)BLOCK_SIZE / (size_t)T2_NODE_STORAGE_SIZE };
|
||||
enum { NR_OF_T1_NODES_PER_BLK = (size_t)BLOCK_SIZE / (size_t)T1_NODE_STORAGE_SIZE };
|
||||
enum { ON_DISC_NODE_SIZE = 64 };
|
||||
enum { NUM_NODES_PER_BLK = (size_t)BLOCK_SIZE / (size_t)ON_DISC_NODE_SIZE };
|
||||
enum { TREE_MAX_DEGREE_LOG_2 = 6 };
|
||||
enum { TREE_MAX_DEGREE = 1 << TREE_MAX_DEGREE_LOG_2 };
|
||||
enum { TREE_MAX_LEVEL = 6 };
|
||||
enum { TREE_MAX_NR_OF_LEVELS = TREE_MAX_LEVEL + 1 };
|
||||
enum { T2_NODE_LVL = 1 };
|
||||
enum { VBD_LOWEST_T1_LVL = 1 };
|
||||
enum { FT_LOWEST_T1_LVL = 2 };
|
||||
enum { MT_LOWEST_T1_LVL = 2 };
|
||||
enum { KEY_SIZE = 32 };
|
||||
enum { MAX_NR_OF_SNAPSHOTS = 48 };
|
||||
enum { MAX_SNAP_IDX = MAX_NR_OF_SNAPSHOTS - 1 };
|
||||
@ -91,15 +89,19 @@ namespace Tresor {
|
||||
struct Superblock;
|
||||
struct Superblock_info;
|
||||
struct Snapshot;
|
||||
struct Snapshot_generations;
|
||||
struct Snapshots_info;
|
||||
struct Snapshots;
|
||||
struct Type_1_node;
|
||||
struct Type_1_node_block;
|
||||
struct Type_1_node_walk;
|
||||
struct Type_1_node_block_walk;
|
||||
struct Type_2_node;
|
||||
struct Type_2_node_block;
|
||||
struct Tree_walk_pbas;
|
||||
struct Tree_walk_generations;
|
||||
struct Level_indent;
|
||||
struct Tree_root;
|
||||
class Pba_allocator;
|
||||
|
||||
template <size_t LEN>
|
||||
class Fixed_length;
|
||||
@ -114,24 +116,16 @@ namespace Tresor {
|
||||
return to_the_power_of<Virtual_block_address>(degree, max_lvl) - 1;
|
||||
}
|
||||
|
||||
inline Physical_block_address
|
||||
alloc_pba_from_resizing_contingent(Physical_block_address &first_pba,
|
||||
Number_of_blocks &nr_of_pbas)
|
||||
inline Physical_block_address alloc_pba_from_range(Physical_block_address &first_pba, Number_of_blocks &num_pbas)
|
||||
{
|
||||
if (nr_of_pbas == 0) {
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
Physical_block_address const allocated_pba { first_pba };
|
||||
first_pba = first_pba + 1;
|
||||
nr_of_pbas = nr_of_pbas - 1;
|
||||
return allocated_pba;
|
||||
ASSERT(num_pbas);
|
||||
first_pba++;
|
||||
num_pbas--;
|
||||
return first_pba - 1;
|
||||
}
|
||||
|
||||
inline Tree_node_index
|
||||
t1_child_idx_for_vba_typed(Virtual_block_address vba,
|
||||
Tree_level_index lvl,
|
||||
Tree_degree degr)
|
||||
t1_node_idx_for_vba_typed(Virtual_block_address vba, Tree_level_index lvl, Tree_degree degr)
|
||||
{
|
||||
uint64_t const degr_log_2 { log2(degr) };
|
||||
uint64_t const degr_mask { ((uint64_t)1 << degr_log_2) - 1 };
|
||||
@ -140,59 +134,89 @@ namespace Tresor {
|
||||
}
|
||||
|
||||
template <typename T1, typename T2, typename T3>
|
||||
inline Tree_node_index t1_child_idx_for_vba(T1 vba,
|
||||
T2 lvl,
|
||||
T3 degr)
|
||||
inline Tree_node_index t1_node_idx_for_vba(T1 vba, T2 lvl, T3 degr)
|
||||
{
|
||||
return t1_child_idx_for_vba_typed((Virtual_block_address)vba,
|
||||
(Tree_level_index)lvl,
|
||||
(Tree_degree)degr);
|
||||
return t1_node_idx_for_vba_typed((Virtual_block_address)vba, (Tree_level_index)lvl, (Tree_degree)degr);
|
||||
}
|
||||
|
||||
inline Tree_node_index t2_child_idx_for_vba(Virtual_block_address vba,
|
||||
Tree_degree degr)
|
||||
inline Tree_node_index t2_node_idx_for_vba(Virtual_block_address vba, Tree_degree degr)
|
||||
{
|
||||
uint64_t const degr_log_2 { log2(degr) };
|
||||
uint64_t const degr_mask { ((uint64_t)1 << degr_log_2) - 1 };
|
||||
return (Tree_node_index)((uint64_t)vba & degr_mask);
|
||||
}
|
||||
|
||||
inline Virtual_block_address vbd_node_min_vba(Tree_degree_log_2 vbd_degr_log_2,
|
||||
Tree_level_index vbd_lvl,
|
||||
Virtual_block_address vbd_leaf_vba)
|
||||
{
|
||||
return vbd_leaf_vba & (~(Physical_block_address)0 << ((Physical_block_address)vbd_degr_log_2 * vbd_lvl));
|
||||
}
|
||||
|
||||
inline Number_of_blocks vbd_node_num_vbas(Tree_degree_log_2 vbd_degr_log_2, Tree_level_index vbd_lvl)
|
||||
{
|
||||
return (Number_of_blocks)1 << ((Number_of_blocks)vbd_degr_log_2 * vbd_lvl);
|
||||
}
|
||||
|
||||
inline Virtual_block_address vbd_node_max_vba(Tree_degree_log_2 vbd_degr_log_2,
|
||||
Tree_level_index vbd_lvl,
|
||||
Virtual_block_address vbd_leaf_vba)
|
||||
{
|
||||
return vbd_node_num_vbas(vbd_degr_log_2, vbd_lvl) - 1 + vbd_node_min_vba(vbd_degr_log_2, vbd_lvl, vbd_leaf_vba);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class Tresor::Pba_allocator
|
||||
{
|
||||
private:
|
||||
|
||||
Physical_block_address const _first_pba;
|
||||
Number_of_blocks _num_used_pbas { 0 };
|
||||
|
||||
public:
|
||||
|
||||
Pba_allocator(Physical_block_address const first_pba) : _first_pba { first_pba } { }
|
||||
|
||||
Number_of_blocks num_used_pbas() { return _num_used_pbas; }
|
||||
|
||||
Physical_block_address first_pba() { return _first_pba; }
|
||||
|
||||
bool alloc(Physical_block_address &pba)
|
||||
{
|
||||
if (_num_used_pbas > MAX_PBA - _first_pba)
|
||||
return false;
|
||||
|
||||
pba = _first_pba + _num_used_pbas;
|
||||
_num_used_pbas++;
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
struct Tresor::Byte_range
|
||||
{
|
||||
uint8_t const *ptr;
|
||||
size_t size;
|
||||
size_t size;
|
||||
|
||||
void print(Output &out) const
|
||||
{
|
||||
using Genode::print;
|
||||
|
||||
enum { MAX_BYTES_PER_LINE = 64 };
|
||||
enum { MAX_BYTES_PER_WORD = 4 };
|
||||
|
||||
if (size > 0xffff) {
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
ASSERT(size <= 0xffff);
|
||||
if (size > MAX_BYTES_PER_LINE) {
|
||||
|
||||
for (size_t idx { 0 }; idx < size; idx++) {
|
||||
|
||||
if (idx % MAX_BYTES_PER_LINE == 0)
|
||||
print(out, "\n ",
|
||||
Hex((uint16_t)idx, Hex::PREFIX, Hex::PAD), ": ");
|
||||
print(out, "\n ", Hex((uint16_t)idx, Hex::PREFIX, Hex::PAD), ": ");
|
||||
|
||||
else if (idx % MAX_BYTES_PER_WORD == 0)
|
||||
print(out, " ");
|
||||
|
||||
print(out, Hex(ptr[idx], Hex::OMIT_PREFIX, Hex::PAD));
|
||||
}
|
||||
|
||||
} else {
|
||||
|
||||
for (size_t idx { 0 }; idx < size; idx++) {
|
||||
|
||||
if (idx % MAX_BYTES_PER_WORD == 0 && idx != 0)
|
||||
print(out, " ");
|
||||
|
||||
@ -450,6 +474,11 @@ struct Tresor::Type_1_node
|
||||
hash != node.hash;
|
||||
}
|
||||
|
||||
bool is_volatile(Generation curr_gen) const
|
||||
{
|
||||
return gen == INITIAL_GENERATION || gen == curr_gen;
|
||||
}
|
||||
|
||||
void print(Output &out) const
|
||||
{
|
||||
Genode::print(out, "pba ", pba, " gen ", gen, " hash ", hash);
|
||||
@ -457,9 +486,26 @@ struct Tresor::Type_1_node
|
||||
};
|
||||
|
||||
|
||||
struct Tresor::Tree_root
|
||||
{
|
||||
Physical_block_address &pba;
|
||||
Generation &gen;
|
||||
Hash &hash;
|
||||
Tree_level_index &max_lvl;
|
||||
Tree_degree °ree;
|
||||
Number_of_leaves &num_leaves;
|
||||
|
||||
Type_1_node t1_node() const { return { pba, gen, hash }; }
|
||||
|
||||
void t1_node(Type_1_node const &node) { pba = node.pba; gen = node.gen; hash = node.hash; }
|
||||
|
||||
void print(Output &out) const { Genode::print(out, t1_node(), " maxlvl ", max_lvl, " degr ", degree, " leaves ", num_leaves); }
|
||||
};
|
||||
|
||||
|
||||
struct Tresor::Type_1_node_block
|
||||
{
|
||||
Type_1_node nodes[NR_OF_T1_NODES_PER_BLK] { };
|
||||
Type_1_node nodes[NUM_NODES_PER_BLK] { };
|
||||
|
||||
void decode_from_blk(Block const &blk)
|
||||
{
|
||||
@ -477,6 +523,12 @@ struct Tresor::Type_1_node_block
|
||||
};
|
||||
|
||||
|
||||
struct Tresor::Type_1_node_block_walk
|
||||
{
|
||||
Type_1_node_block items[TREE_MAX_NR_OF_LEVELS] { };
|
||||
};
|
||||
|
||||
|
||||
struct Tresor::Type_2_node
|
||||
{
|
||||
Physical_block_address pba { 0 };
|
||||
@ -531,7 +583,7 @@ struct Tresor::Type_2_node
|
||||
|
||||
struct Tresor::Type_2_node_block
|
||||
{
|
||||
Type_2_node nodes[NR_OF_T2_NODES_PER_BLK] { };
|
||||
Type_2_node nodes[NUM_NODES_PER_BLK] { };
|
||||
|
||||
void decode_from_blk(Block const &blk)
|
||||
{
|
||||
@ -610,7 +662,7 @@ struct Tresor::Snapshots
|
||||
|
||||
void print(Output &out) const
|
||||
{
|
||||
bool first { false };
|
||||
bool first { true };
|
||||
for (Snapshot_index idx { 0 }; idx < MAX_NR_OF_SNAPSHOTS; idx++) {
|
||||
|
||||
if (!items[idx].valid)
|
||||
@ -647,31 +699,27 @@ struct Tresor::Snapshots
|
||||
}
|
||||
}
|
||||
|
||||
Snapshot_index newest_snapshot_idx() const
|
||||
Snapshot_index newest_snap_idx() const
|
||||
{
|
||||
Snapshot_index result { INVALID_SNAP_IDX };
|
||||
for (Snapshot_index idx { 0 }; idx < MAX_NR_OF_SNAPSHOTS; idx ++) {
|
||||
|
||||
Snapshot const &snap { items[idx] };
|
||||
if (!snap.valid)
|
||||
if (!items[idx].valid)
|
||||
continue;
|
||||
|
||||
if (result != INVALID_SNAP_IDX &&
|
||||
snap.gen <= items[result].gen)
|
||||
if (result != INVALID_SNAP_IDX && items[idx].gen <= items[result].gen)
|
||||
continue;
|
||||
|
||||
result = idx;
|
||||
}
|
||||
if (result != INVALID_SNAP_IDX)
|
||||
return result;
|
||||
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
ASSERT(result != INVALID_SNAP_IDX);
|
||||
return result;
|
||||
}
|
||||
|
||||
Snapshot_index
|
||||
idx_of_invalid_or_lowest_gen_evictable_snap(Generation curr_gen,
|
||||
Generation last_secured_gen) const
|
||||
/**
|
||||
* Returns the index of an unused slot or, if all are used, of the slot
|
||||
* that contains the lowest-generation evictable snapshot (no "keep" flag).
|
||||
*/
|
||||
Snapshot_index alloc_idx(Generation curr_gen, Generation last_secured_gen) const
|
||||
{
|
||||
Snapshot_index result { INVALID_SNAP_IDX };
|
||||
for (Snapshot_index idx { 0 }; idx < MAX_NR_OF_SNAPSHOTS; idx ++) {
|
||||
@ -691,11 +739,8 @@ struct Tresor::Snapshots
|
||||
|
||||
result = idx;
|
||||
}
|
||||
if (result != INVALID_SNAP_IDX)
|
||||
return result;
|
||||
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
ASSERT(result != INVALID_SNAP_IDX);
|
||||
return result;
|
||||
}
|
||||
};
|
||||
|
||||
@ -714,20 +759,20 @@ struct Tresor::Superblock
|
||||
Key previous_key { }; // offset 25
|
||||
Key current_key { }; // offset 61
|
||||
Snapshots snapshots { }; // offset 97
|
||||
Generation last_secured_generation { }; // offset 3553
|
||||
Snapshot_index curr_snap { }; // offset 3561
|
||||
Generation last_secured_generation { 0 }; // offset 3553
|
||||
Snapshot_index curr_snap_idx { 0 }; // offset 3561
|
||||
Tree_degree degree { TREE_MIN_DEGREE }; // offset 3565
|
||||
Physical_block_address first_pba { 0 }; // offset 3569
|
||||
Number_of_blocks nr_of_pbas { 0 }; // offset 3577
|
||||
Generation free_gen { 0 }; // offset 3585
|
||||
Physical_block_address free_number { 0 }; // offset 3593
|
||||
Hash free_hash { 0 }; // offset 3601
|
||||
Hash free_hash { }; // offset 3601
|
||||
Tree_level_index free_max_level { 0 }; // offset 3633
|
||||
Tree_degree free_degree { TREE_MIN_DEGREE }; // offset 3637
|
||||
Number_of_leaves free_leaves { 0 }; // offset 3641
|
||||
Generation meta_gen { 0 }; // offset 3649
|
||||
Physical_block_address meta_number { 0 }; // offset 3657
|
||||
Hash meta_hash { 0 }; // offset 3665
|
||||
Hash meta_hash { }; // offset 3665
|
||||
Tree_level_index meta_max_level { 0 }; // offset 3697
|
||||
Tree_degree meta_degree { TREE_MIN_DEGREE }; // offset 3701
|
||||
Number_of_leaves meta_leaves { 0 }; // offset 3705
|
||||
@ -770,7 +815,7 @@ struct Tresor::Superblock
|
||||
current_key.decode_from_blk(scanner);
|
||||
snapshots.decode_from_blk(scanner);
|
||||
scanner.fetch(last_secured_generation);
|
||||
scanner.fetch(curr_snap);
|
||||
scanner.fetch(curr_snap_idx);
|
||||
scanner.fetch(degree);
|
||||
scanner.fetch(first_pba);
|
||||
scanner.fetch(nr_of_pbas);
|
||||
@ -800,7 +845,7 @@ struct Tresor::Superblock
|
||||
current_key.encode_to_blk(generator);
|
||||
snapshots.encode_to_blk(generator);
|
||||
generator.append(last_secured_generation);
|
||||
generator.append(curr_snap);
|
||||
generator.append(curr_snap_idx);
|
||||
generator.append(degree);
|
||||
generator.append(first_pba);
|
||||
generator.append(nr_of_pbas);
|
||||
@ -835,7 +880,7 @@ struct Tresor::Superblock
|
||||
{
|
||||
Genode::print(
|
||||
out, "state ", state_to_str(state), " last_secured_gen ",
|
||||
last_secured_generation, " curr_snap ", curr_snap, " degr ",
|
||||
last_secured_generation, " curr_snap ", curr_snap_idx, " degr ",
|
||||
degree, " first_pba ", first_pba, " pbas ", nr_of_pbas,
|
||||
" snapshots");
|
||||
|
||||
@ -843,6 +888,43 @@ struct Tresor::Superblock
|
||||
if (snap.valid)
|
||||
Genode::print(out, " ", snap);
|
||||
}
|
||||
|
||||
Snapshot &curr_snap() { return snapshots.items[curr_snap_idx]; }
|
||||
Snapshot const &curr_snap() const { return snapshots.items[curr_snap_idx]; }
|
||||
|
||||
Virtual_block_address max_vba() const
|
||||
{
|
||||
ASSERT(valid());
|
||||
return curr_snap().nr_of_leaves - 1;
|
||||
}
|
||||
|
||||
void copy_all_but_key_values_from(Superblock const &sb)
|
||||
{
|
||||
state = sb.state;
|
||||
rekeying_vba = sb.rekeying_vba;
|
||||
resizing_nr_of_pbas = sb.resizing_nr_of_pbas;
|
||||
resizing_nr_of_leaves = sb.resizing_nr_of_leaves;
|
||||
first_pba = sb.first_pba;
|
||||
nr_of_pbas = sb.nr_of_pbas;
|
||||
previous_key.id = sb.previous_key.id;
|
||||
current_key.id = sb.current_key.id;
|
||||
snapshots = sb.snapshots;
|
||||
last_secured_generation = sb.last_secured_generation;
|
||||
curr_snap_idx = sb.curr_snap_idx;
|
||||
degree = sb.degree;
|
||||
free_gen = sb.free_gen;
|
||||
free_number = sb.free_number;
|
||||
free_hash = sb.free_hash;
|
||||
free_max_level = sb.free_max_level;
|
||||
free_degree = sb.free_degree;
|
||||
free_leaves = sb.free_leaves;
|
||||
meta_gen = sb.meta_gen;
|
||||
meta_number = sb.meta_number;
|
||||
meta_hash = sb.meta_hash;
|
||||
meta_max_level = sb.meta_max_level;
|
||||
meta_degree = sb.meta_degree;
|
||||
meta_leaves = sb.meta_leaves;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@ -884,9 +966,21 @@ struct Tresor::Tree_walk_pbas
|
||||
};
|
||||
|
||||
|
||||
struct Tresor::Snapshot_generations
|
||||
struct Tresor::Tree_walk_generations
|
||||
{
|
||||
Generation items[MAX_NR_OF_SNAPSHOTS] { 0 };
|
||||
Generation items[TREE_MAX_NR_OF_LEVELS] { };
|
||||
};
|
||||
|
||||
|
||||
struct Tresor::Snapshots_info
|
||||
{
|
||||
Generation generations[MAX_NR_OF_SNAPSHOTS] { };
|
||||
|
||||
Snapshots_info()
|
||||
{
|
||||
for (Generation &gen : generations)
|
||||
gen = INVALID_GENERATION;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
@ -14,12 +14,8 @@
|
||||
#ifndef _TRESOR__VBD_CHECK_H_
|
||||
#define _TRESOR__VBD_CHECK_H_
|
||||
|
||||
/* base includes */
|
||||
#include <base/output.h>
|
||||
|
||||
/* tresor includes */
|
||||
#include <tresor/types.h>
|
||||
#include <tresor/module.h>
|
||||
|
||||
namespace Tresor {
|
||||
|
||||
@ -31,94 +27,65 @@ namespace Tresor {
|
||||
|
||||
class Tresor::Vbd_check_request : public Module_request
|
||||
{
|
||||
public:
|
||||
|
||||
enum Type { INVALID = 0, CHECK = 1, };
|
||||
friend class Vbd_check_channel;
|
||||
|
||||
private:
|
||||
|
||||
friend class Vbd_check;
|
||||
friend class Vbd_check_channel;
|
||||
Tree_root const &_vbd;
|
||||
bool &_success;
|
||||
|
||||
Type _type { INVALID };
|
||||
Tree_level_index _max_lvl { 0 };
|
||||
Tree_node_index _max_child_idx { 0 };
|
||||
Number_of_leaves _nr_of_leaves { 0 };
|
||||
Type_1_node _root { };
|
||||
bool _success { false };
|
||||
NONCOPYABLE(Vbd_check_request);
|
||||
|
||||
public:
|
||||
|
||||
Vbd_check_request() { }
|
||||
Vbd_check_request(Module_id, Module_channel_id, Tree_root const &, bool &);
|
||||
|
||||
Vbd_check_request(uint64_t src_module_id,
|
||||
uint64_t src_request_id,
|
||||
Type type,
|
||||
Tree_level_index max_lvl,
|
||||
Tree_node_index max_child_idx,
|
||||
Number_of_leaves nr_of_leaves,
|
||||
Type_1_node root);
|
||||
|
||||
Type type() const { return _type; }
|
||||
|
||||
bool success() const { return _success; }
|
||||
|
||||
static char const *type_to_string(Type type);
|
||||
|
||||
|
||||
/********************
|
||||
** Module_request **
|
||||
********************/
|
||||
|
||||
void print(Output &out) const override
|
||||
{
|
||||
Genode::print(out, type_to_string(_type), " root ", _root);
|
||||
}
|
||||
void print(Output &out) const override { Genode::print(out, "check ", _vbd); }
|
||||
};
|
||||
|
||||
|
||||
class Tresor::Vbd_check_channel
|
||||
class Tresor::Vbd_check_channel : public Module_channel
|
||||
{
|
||||
private:
|
||||
|
||||
friend class Vbd_check;
|
||||
|
||||
using Request = Vbd_check_request;
|
||||
|
||||
enum Child_state {
|
||||
READ_BLOCK = 0, CHECK_HASH = 1, DONE = 2 };
|
||||
enum State : State_uint { REQ_SUBMITTED, REQ_IN_PROGRESS, REQ_COMPLETE, REQ_GENERATED, READ_BLK_SUCCEEDED };
|
||||
|
||||
struct Type_1_level
|
||||
State _state { REQ_COMPLETE };
|
||||
Type_1_node_block_walk _t1_blks { };
|
||||
bool _check_node[TREE_MAX_NR_OF_LEVELS][NUM_NODES_PER_BLK] { };
|
||||
Block _blk { };
|
||||
Request *_req_ptr { };
|
||||
Number_of_leaves _num_remaining_leaves { 0 };
|
||||
bool _generated_req_success { false };
|
||||
|
||||
NONCOPYABLE(Vbd_check_channel);
|
||||
|
||||
void _generated_req_completed(State_uint) override;
|
||||
|
||||
void _request_submitted(Module_request &) override;
|
||||
|
||||
bool _request_complete() override { return _state == REQ_COMPLETE; }
|
||||
|
||||
void _mark_req_failed(bool &, Error_string);
|
||||
|
||||
void _mark_req_successful(bool &);
|
||||
|
||||
bool _execute_node(Tree_level_index, Tree_node_index, bool &);
|
||||
|
||||
template <typename REQUEST, typename... ARGS>
|
||||
void _generate_req(State_uint state, bool &progress, ARGS &&... args)
|
||||
{
|
||||
Child_state children_state[NR_OF_T1_NODES_PER_BLK] { };
|
||||
Type_1_node_block children { };
|
||||
_state = REQ_GENERATED;
|
||||
generate_req<REQUEST>(state, progress, args..., _generated_req_success);
|
||||
}
|
||||
|
||||
Type_1_level()
|
||||
{
|
||||
for (Child_state &state : children_state)
|
||||
state = DONE;
|
||||
}
|
||||
};
|
||||
public:
|
||||
|
||||
enum Primitive_tag { INVALID, BLOCK_IO };
|
||||
Vbd_check_channel(Module_channel_id id) : Module_channel { VBD_CHECK, id } { }
|
||||
|
||||
struct Generated_primitive
|
||||
{
|
||||
bool success { false };
|
||||
Primitive_tag tag { INVALID };
|
||||
Physical_block_address blk_nr { 0 };
|
||||
bool dropped { false };
|
||||
|
||||
bool valid() const { return tag != INVALID; }
|
||||
};
|
||||
|
||||
Generated_primitive _gen_prim { };
|
||||
Tree_level_index _lvl_to_read { 0 };
|
||||
Child_state _root_state { DONE };
|
||||
Block _leaf_lvl { };
|
||||
Block _encoded_blk { };
|
||||
Type_1_level _t1_lvls[TREE_MAX_LEVEL] { };
|
||||
Request _request { };
|
||||
void execute(bool &);
|
||||
};
|
||||
|
||||
|
||||
@ -126,72 +93,17 @@ class Tresor::Vbd_check : public Module
|
||||
{
|
||||
private:
|
||||
|
||||
using Request = Vbd_check_request;
|
||||
using Channel = Vbd_check_channel;
|
||||
using Child_state = Vbd_check_channel::Child_state;
|
||||
using Type_1_level = Vbd_check_channel::Type_1_level;
|
||||
|
||||
enum { NR_OF_CHANNELS = 1 };
|
||||
|
||||
Channel _channels[NR_OF_CHANNELS] { };
|
||||
|
||||
void _execute_check(Channel &channel,
|
||||
bool &progress);
|
||||
|
||||
void _mark_req_failed(Channel &channel,
|
||||
bool &progress,
|
||||
char const *str);
|
||||
|
||||
void _mark_req_successful(Channel &channel,
|
||||
bool &progress);
|
||||
|
||||
void _execute_inner_t1_child(Channel &chan,
|
||||
Type_1_node const &child,
|
||||
Type_1_level &child_lvl,
|
||||
Child_state &child_state,
|
||||
Tree_level_index lvl,
|
||||
Tree_node_index child_idx,
|
||||
bool &progress);
|
||||
|
||||
|
||||
void _execute_leaf_child(Channel &chan,
|
||||
Type_1_node const &child,
|
||||
Block const &child_lvl,
|
||||
Child_state &child_state,
|
||||
Tree_level_index lvl,
|
||||
Tree_node_index child_idx,
|
||||
bool &progress);
|
||||
|
||||
|
||||
/************
|
||||
** Module **
|
||||
************/
|
||||
|
||||
bool _peek_completed_request(uint8_t *buf_ptr,
|
||||
size_t buf_size) override;
|
||||
|
||||
void _drop_completed_request(Module_request &req) override;
|
||||
|
||||
bool _peek_generated_request(uint8_t *buf_ptr,
|
||||
size_t buf_size) override;
|
||||
|
||||
void _drop_generated_request(Module_request &mod_req) override;
|
||||
|
||||
void generated_request_complete(Module_request &req) override;
|
||||
Constructible<Channel> _channels[1] { };
|
||||
|
||||
NONCOPYABLE(Vbd_check);
|
||||
|
||||
public:
|
||||
|
||||
/************
|
||||
** Module **
|
||||
************/
|
||||
|
||||
bool ready_to_submit_request() override;
|
||||
|
||||
void submit_request(Module_request &req) override;
|
||||
Vbd_check();
|
||||
|
||||
void execute(bool &) override;
|
||||
|
||||
};
|
||||
|
||||
#endif /* _TRESOR__VBD_CHECK_H_ */
|
||||
|
@ -1,5 +1,6 @@
|
||||
/*
|
||||
* \brief Module for initializing the VBD
|
||||
* \author Martin Stein
|
||||
* \author Josef Soentgen
|
||||
* \date 2023-03-03
|
||||
*/
|
||||
@ -14,11 +15,8 @@
|
||||
#ifndef _TRESOR__VBD_INITIALIZER_H_
|
||||
#define _TRESOR__VBD_INITIALIZER_H_
|
||||
|
||||
/* base includes */
|
||||
#include <base/output.h>
|
||||
|
||||
/* tresor includes */
|
||||
#include <tresor/module.h>
|
||||
#include <tresor/types.h>
|
||||
|
||||
namespace Tresor {
|
||||
|
||||
@ -30,116 +28,63 @@ namespace Tresor {
|
||||
|
||||
class Tresor::Vbd_initializer_request : public Module_request
|
||||
{
|
||||
public:
|
||||
|
||||
enum Type { INVALID = 0, INIT = 1, };
|
||||
friend class Vbd_initializer_channel;
|
||||
|
||||
private:
|
||||
|
||||
friend class Vbd_initializer;
|
||||
friend class Vbd_initializer_channel;
|
||||
|
||||
Type _type { INVALID };
|
||||
uint8_t _root_node[sizeof(Type_1_node)] { 0 };
|
||||
uint64_t _max_level_idx { 0 };
|
||||
uint64_t _max_child_idx { 0 };
|
||||
uint64_t _nr_of_leaves { 0 };
|
||||
bool _success { false };
|
||||
Tree_root &_vbd;
|
||||
Pba_allocator &_pba_alloc;
|
||||
bool &_success;
|
||||
|
||||
NONCOPYABLE(Vbd_initializer_request);
|
||||
|
||||
public:
|
||||
|
||||
Vbd_initializer_request() { }
|
||||
Vbd_initializer_request(Module_id, Module_channel_id, Tree_root &, Pba_allocator &, bool &);
|
||||
|
||||
Vbd_initializer_request(Module_id src_module_id,
|
||||
Module_request_id src_request_id);
|
||||
|
||||
static void create(void *buf_ptr,
|
||||
size_t buf_size,
|
||||
uint64_t src_module_id,
|
||||
uint64_t src_request_id,
|
||||
size_t req_type,
|
||||
uint64_t max_level_idx,
|
||||
uint64_t max_child_idx,
|
||||
uint64_t nr_of_leaves);
|
||||
|
||||
void *root_node() { return _root_node; }
|
||||
|
||||
Type type() const { return _type; }
|
||||
|
||||
bool success() const { return _success; }
|
||||
|
||||
static char const *type_to_string(Type type);
|
||||
|
||||
|
||||
/********************
|
||||
** Module_request **
|
||||
********************/
|
||||
|
||||
void print(Output &out) const override { Genode::print(out, type_to_string(_type)); }
|
||||
void print(Output &out) const override { Genode::print(out, "init"); }
|
||||
};
|
||||
|
||||
|
||||
class Tresor::Vbd_initializer_channel
|
||||
class Tresor::Vbd_initializer_channel : public Module_channel
|
||||
{
|
||||
private:
|
||||
|
||||
friend class Vbd_initializer;
|
||||
using Request = Vbd_initializer_request;
|
||||
|
||||
enum State {
|
||||
INACTIVE, SUBMITTED, PENDING, IN_PROGRESS, COMPLETE,
|
||||
BLOCK_ALLOC_PENDING,
|
||||
BLOCK_ALLOC_IN_PROGRESS,
|
||||
BLOCK_ALLOC_COMPLETE,
|
||||
BLOCK_IO_PENDING,
|
||||
BLOCK_IO_IN_PROGRESS,
|
||||
BLOCK_IO_COMPLETE,
|
||||
};
|
||||
enum State { REQ_GENERATED, SUBMITTED, COMPLETE, EXECUTE_NODES };
|
||||
|
||||
enum Child_state { DONE, INIT_BLOCK, INIT_NODE, WRITE_BLOCK, };
|
||||
enum Node_state { DONE, INIT_BLOCK, INIT_NODE, WRITE_BLOCK };
|
||||
|
||||
struct Type_1_level
|
||||
{
|
||||
Type_1_node_block children { };
|
||||
Child_state children_state[NR_OF_T1_NODES_PER_BLK] { DONE };
|
||||
};
|
||||
State _state { COMPLETE };
|
||||
Vbd_initializer_request *_req_ptr { };
|
||||
Type_1_node_block_walk _t1_blks { };
|
||||
Node_state _node_states[TREE_MAX_NR_OF_LEVELS][NUM_NODES_PER_BLK] { DONE };
|
||||
bool _generated_req_success { false };
|
||||
Block _blk { };
|
||||
Number_of_leaves _num_remaining_leaves { };
|
||||
|
||||
struct Root_node
|
||||
{
|
||||
Type_1_node node { };
|
||||
Child_state state { DONE };
|
||||
};
|
||||
NONCOPYABLE(Vbd_initializer_channel);
|
||||
|
||||
State _state { INACTIVE };
|
||||
Vbd_initializer_request _request { };
|
||||
Root_node _root_node { };
|
||||
Type_1_level _t1_levels[TREE_MAX_LEVEL] { };
|
||||
uint64_t _level_to_write { 0 };
|
||||
uint64_t _blk_nr { 0 };
|
||||
uint64_t _child_pba { 0 };
|
||||
bool _generated_req_success { false };
|
||||
Block _encoded_blk { };
|
||||
void _generated_req_completed(State_uint) override;
|
||||
|
||||
static void reset_node(Type_1_node &node)
|
||||
{
|
||||
memset(&node, 0, sizeof(Type_1_node));
|
||||
}
|
||||
bool _request_complete() override { return _state == COMPLETE; }
|
||||
|
||||
static void reset_level(Type_1_level &level,
|
||||
Child_state state)
|
||||
{
|
||||
for (unsigned int i = 0; i < NR_OF_T1_NODES_PER_BLK; i++) {
|
||||
reset_node(level.children.nodes[i]);
|
||||
level.children_state[i] = state;
|
||||
}
|
||||
}
|
||||
void _request_submitted(Module_request &) override;
|
||||
|
||||
static void dump(Type_1_node_block const &node_block)
|
||||
{
|
||||
for (auto v : node_block.nodes) {
|
||||
log(v);
|
||||
}
|
||||
}
|
||||
void _reset_level(Tree_level_index, Node_state);
|
||||
|
||||
bool _execute_node(Tree_level_index, Tree_node_index, bool &);
|
||||
|
||||
void _mark_req_failed(bool &, char const *);
|
||||
|
||||
void _mark_req_successful(bool &);
|
||||
|
||||
public:
|
||||
|
||||
Vbd_initializer_channel(Module_channel_id id) : Module_channel { VBD_INITIALIZER, id } { }
|
||||
|
||||
void execute(bool &);
|
||||
};
|
||||
|
||||
|
||||
@ -147,76 +92,17 @@ class Tresor::Vbd_initializer : public Module
|
||||
{
|
||||
private:
|
||||
|
||||
using Request = Vbd_initializer_request;
|
||||
using Channel = Vbd_initializer_channel;
|
||||
|
||||
enum { NR_OF_CHANNELS = 1 };
|
||||
|
||||
Channel _channels[NR_OF_CHANNELS] { };
|
||||
|
||||
void _execute_leaf_child(Channel &channel,
|
||||
bool &progress,
|
||||
uint64_t &nr_of_leaves,
|
||||
Type_1_node &child,
|
||||
Vbd_initializer_channel::Child_state &child_state,
|
||||
uint64_t level_index,
|
||||
uint64_t child_index);
|
||||
|
||||
void _execute_inner_t1_child(Channel &channel,
|
||||
bool &progress,
|
||||
uint64_t nr_of_leaves,
|
||||
uint64_t &level_to_write,
|
||||
Type_1_node &child,
|
||||
Vbd_initializer_channel::Type_1_level &child_level,
|
||||
Vbd_initializer_channel::Child_state &child_state,
|
||||
uint64_t level_index,
|
||||
uint64_t child_index);
|
||||
|
||||
void _execute(Channel &channel,
|
||||
bool &progress);
|
||||
|
||||
void _execute_init(Channel &channel,
|
||||
bool &progress);
|
||||
|
||||
void _mark_req_failed(Channel &channel,
|
||||
bool &progress,
|
||||
char const *str);
|
||||
|
||||
void _mark_req_successful(Channel &channel,
|
||||
bool &progress);
|
||||
|
||||
|
||||
/************
|
||||
** Module **
|
||||
************/
|
||||
|
||||
bool _peek_completed_request(uint8_t *buf_ptr,
|
||||
size_t buf_size) override;
|
||||
|
||||
void _drop_completed_request(Module_request &req) override;
|
||||
|
||||
bool _peek_generated_request(uint8_t *buf_ptr,
|
||||
size_t buf_size) override;
|
||||
|
||||
void _drop_generated_request(Module_request &mod_req) override;
|
||||
|
||||
void generated_request_complete(Module_request &req) override;
|
||||
Constructible<Channel> _channels[1] { };
|
||||
|
||||
NONCOPYABLE(Vbd_initializer);
|
||||
|
||||
public:
|
||||
|
||||
Vbd_initializer();
|
||||
|
||||
/************
|
||||
** Module **
|
||||
************/
|
||||
|
||||
bool ready_to_submit_request() override;
|
||||
|
||||
void submit_request(Module_request &req) override;
|
||||
|
||||
void execute(bool &) override;
|
||||
|
||||
};
|
||||
|
||||
#endif /* _TRESOR__VBD_INITIALIZER_H_ */
|
||||
|
@ -19,6 +19,8 @@ namespace Tresor {
|
||||
enum { VERBOSE_MODULE_COMMUNICATION = 0 };
|
||||
enum { VERBOSE_VBD_EXTENSION = 0 };
|
||||
enum { VERBOSE_FT_EXTENSION = 0 };
|
||||
enum { VERBOSE_VBD_INIT = 0 };
|
||||
enum { VERBOSE_FT_INIT = 0 };
|
||||
enum { VERBOSE_REKEYING = 0 };
|
||||
enum { VERBOSE_READ_VBA = 0 };
|
||||
enum { VERBOSE_WRITE_VBA = 0 };
|
||||
|
@ -1,35 +0,0 @@
|
||||
/*
|
||||
* \brief Utilities for a more convenient use of the VFS
|
||||
* \author Martin Stein
|
||||
* \date 2020-10-29
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2023 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
#ifndef _TRESOR__VFS_UTILITIES_H_
|
||||
#define _TRESOR__VFS_UTILITIES_H_
|
||||
|
||||
/* base includes */
|
||||
#include <vfs/vfs_handle.h>
|
||||
#include <vfs/simple_env.h>
|
||||
|
||||
|
||||
Vfs::Vfs_handle &vfs_open(Vfs::Env &vfs_env,
|
||||
Genode::String<128> path,
|
||||
Vfs::Directory_service::Open_mode mode);
|
||||
|
||||
|
||||
Vfs::Vfs_handle &vfs_open_wo(Vfs::Env &vfs_env,
|
||||
Genode::String<128> path);
|
||||
|
||||
|
||||
Vfs::Vfs_handle &vfs_open_rw(Vfs::Env &vfs_env,
|
||||
Genode::String<128> path);
|
||||
|
||||
|
||||
#endif /* _TRESOR__VFS_UTILITIES_H_ */
|
@ -15,9 +15,8 @@
|
||||
#define _TRESOR__VIRTUAL_BLOCK_DEVICE_H_
|
||||
|
||||
/* tresor includes */
|
||||
#include <tresor/module.h>
|
||||
#include <tresor/types.h>
|
||||
#include <tresor/vfs_utilities.h>
|
||||
#include <tresor/free_tree.h>
|
||||
|
||||
namespace Tresor {
|
||||
|
||||
@ -28,230 +27,139 @@ namespace Tresor {
|
||||
|
||||
class Tresor::Virtual_block_device_request : public Module_request
|
||||
{
|
||||
friend class Virtual_block_device_channel;
|
||||
|
||||
public:
|
||||
|
||||
enum Type {
|
||||
INVALID = 0, REKEY_VBA = 3, READ_VBA = 1, WRITE_VBA = 2, VBD_EXTENSION_STEP = 4 };
|
||||
enum Type { REKEY_VBA, READ_VBA, WRITE_VBA, EXTENSION_STEP };
|
||||
|
||||
private:
|
||||
|
||||
friend class Virtual_block_device;
|
||||
friend class Virtual_block_device_channel;
|
||||
Type const _type;
|
||||
Virtual_block_address const _vba;
|
||||
Snapshots &_snapshots;
|
||||
Snapshot_index const _curr_snap_idx;
|
||||
Tree_degree const _snap_degr;
|
||||
Generation const _curr_gen;
|
||||
Key_id const _curr_key_id;
|
||||
Key_id const _prev_key_id;
|
||||
Tree_root &_ft;
|
||||
Tree_root &_mt;
|
||||
Tree_degree const _vbd_degree;
|
||||
Virtual_block_address const _vbd_highest_vba;
|
||||
bool const _rekeying;
|
||||
Request_offset const _client_req_offset;
|
||||
Request_tag const _client_req_tag;
|
||||
Generation const _last_secured_gen;
|
||||
Physical_block_address &_pba;
|
||||
Number_of_blocks &_num_pbas;
|
||||
Number_of_leaves &_num_leaves;
|
||||
bool &_success;
|
||||
|
||||
Type _type { INVALID };
|
||||
Virtual_block_address _vba { 0 };
|
||||
Snapshots _snapshots { };
|
||||
Snapshot_index _curr_snap_idx { 0 };
|
||||
Tree_degree _snapshots_degree { 0 };
|
||||
Generation _curr_gen { INVALID_GENERATION };
|
||||
Key_id _new_key_id { 0 };
|
||||
Key_id _old_key_id { 0 };
|
||||
addr_t _ft_root_pba_ptr { 0 };
|
||||
addr_t _ft_root_gen_ptr { 0 };
|
||||
addr_t _ft_root_hash_ptr { 0 };
|
||||
uint64_t _ft_max_level { 0 };
|
||||
uint64_t _ft_degree { 0 };
|
||||
uint64_t _ft_leaves { 0 };
|
||||
addr_t _mt_root_pba_ptr { 0 };
|
||||
addr_t _mt_root_gen_ptr { 0 };
|
||||
addr_t _mt_root_hash_ptr { 0 };
|
||||
uint64_t _mt_max_level { 0 };
|
||||
uint64_t _mt_degree { 0 };
|
||||
uint64_t _mt_leaves { 0 };
|
||||
uint64_t _vbd_degree { 0 };
|
||||
uint64_t _vbd_highest_vba { 0 };
|
||||
bool _rekeying { 0 };
|
||||
uint64_t _client_req_offset { 0 };
|
||||
uint64_t _client_req_tag { 0 };
|
||||
Generation _last_secured_generation { INVALID_GENERATION };
|
||||
Physical_block_address _pba { 0 };
|
||||
Number_of_blocks _nr_of_pbas { 0 };
|
||||
Number_of_leaves _nr_of_leaves { 0 };
|
||||
bool _success { false };
|
||||
NONCOPYABLE(Virtual_block_device_request);
|
||||
|
||||
public:
|
||||
|
||||
Virtual_block_device_request() { }
|
||||
Virtual_block_device_request(Module_id, Module_channel_id, Type, Request_offset, Request_tag, Generation,
|
||||
Tree_root &, Tree_root &, Tree_degree, Virtual_block_address, bool,
|
||||
Virtual_block_address, Snapshot_index, Snapshots &, Tree_degree, Key_id,
|
||||
Key_id, Generation, Physical_block_address &, bool &, Number_of_leaves &,
|
||||
Number_of_blocks &);
|
||||
|
||||
Virtual_block_device_request(Module_id src_module_id,
|
||||
Module_request_id src_request_id);
|
||||
|
||||
static void create(void *buf_ptr,
|
||||
size_t buf_size,
|
||||
uint64_t src_module_id,
|
||||
uint64_t src_request_id,
|
||||
size_t req_type,
|
||||
uint64_t client_req_offset,
|
||||
uint64_t client_req_tag,
|
||||
Generation last_secured_generation,
|
||||
addr_t ft_root_pba_ptr,
|
||||
addr_t ft_root_gen_ptr,
|
||||
addr_t ft_root_hash_ptr,
|
||||
uint64_t ft_max_level,
|
||||
uint64_t ft_degree,
|
||||
uint64_t ft_leaves,
|
||||
addr_t mt_root_pba_ptr,
|
||||
addr_t mt_root_gen_ptr,
|
||||
addr_t mt_root_hash_ptr,
|
||||
uint64_t mt_max_level,
|
||||
uint64_t mt_degree,
|
||||
uint64_t mt_leaves,
|
||||
uint64_t vbd_degree,
|
||||
uint64_t vbd_highest_vba,
|
||||
bool rekeying,
|
||||
Virtual_block_address vba,
|
||||
Snapshot_index curr_snap_idx,
|
||||
Snapshots const *snapshots_ptr,
|
||||
Tree_degree snapshots_degree,
|
||||
Key_id old_key_id,
|
||||
Key_id new_key_id,
|
||||
Generation current_gen,
|
||||
Key_id key_id,
|
||||
Physical_block_address first_pba,
|
||||
Number_of_blocks nr_of_pbas);
|
||||
|
||||
bool success() const { return _success; }
|
||||
|
||||
Physical_block_address pba() const { return _pba; }
|
||||
|
||||
Number_of_blocks nr_of_pbas() const { return _nr_of_pbas; }
|
||||
|
||||
Number_of_leaves nr_of_leaves() const { return _nr_of_leaves; }
|
||||
|
||||
Snapshot_index curr_snap_idx() const { return _curr_snap_idx; }
|
||||
|
||||
Snapshots *snapshots_ptr() { return &_snapshots; }
|
||||
|
||||
static char const *type_to_string(Type type);
|
||||
|
||||
char const *type_name() const { return type_to_string(_type); }
|
||||
|
||||
|
||||
/********************
|
||||
** Module_request **
|
||||
********************/
|
||||
static char const *type_to_string(Type);
|
||||
|
||||
void print(Output &out) const override { Genode::print(out, type_to_string(_type)); }
|
||||
};
|
||||
|
||||
class Tresor::Virtual_block_device_channel
|
||||
class Tresor::Virtual_block_device_channel : public Module_channel
|
||||
{
|
||||
private:
|
||||
|
||||
friend class Virtual_block_device;
|
||||
|
||||
using Request = Virtual_block_device_request;
|
||||
|
||||
enum State {
|
||||
SUBMITTED,
|
||||
READ_ROOT_NODE_PENDING,
|
||||
READ_ROOT_NODE_IN_PROGRESS,
|
||||
READ_ROOT_NODE_COMPLETED,
|
||||
READ_INNER_NODE_PENDING,
|
||||
READ_INNER_NODE_IN_PROGRESS,
|
||||
READ_INNER_NODE_COMPLETED,
|
||||
READ_LEAF_NODE_PENDING,
|
||||
READ_LEAF_NODE_IN_PROGRESS,
|
||||
READ_LEAF_NODE_COMPLETED,
|
||||
READ_CLIENT_DATA_FROM_LEAF_NODE_PENDING,
|
||||
READ_CLIENT_DATA_FROM_LEAF_NODE_IN_PROGRESS,
|
||||
READ_CLIENT_DATA_FROM_LEAF_NODE_COMPLETED,
|
||||
WRITE_CLIENT_DATA_TO_LEAF_NODE_PENDING,
|
||||
WRITE_CLIENT_DATA_TO_LEAF_NODE_IN_PROGRESS,
|
||||
WRITE_CLIENT_DATA_TO_LEAF_NODE_COMPLETED,
|
||||
DECRYPT_LEAF_NODE_PENDING,
|
||||
DECRYPT_LEAF_NODE_IN_PROGRESS,
|
||||
DECRYPT_LEAF_NODE_COMPLETED,
|
||||
ALLOC_PBAS_AT_LEAF_LVL_PENDING,
|
||||
ALLOC_PBAS_AT_LEAF_LVL_IN_PROGRESS,
|
||||
ALLOC_PBAS_AT_LEAF_LVL_COMPLETED,
|
||||
ALLOC_PBAS_AT_LOWEST_INNER_LVL_PENDING,
|
||||
ALLOC_PBAS_AT_LOWEST_INNER_LVL_IN_PROGRESS,
|
||||
ALLOC_PBAS_AT_LOWEST_INNER_LVL_COMPLETED,
|
||||
ALLOC_PBAS_AT_HIGHER_INNER_LVL_PENDING,
|
||||
ALLOC_PBAS_AT_HIGHER_INNER_LVL_IN_PROGRESS,
|
||||
ALLOC_PBAS_AT_HIGHER_INNER_LVL_COMPLETED,
|
||||
ENCRYPT_LEAF_NODE_PENDING,
|
||||
ENCRYPT_LEAF_NODE_IN_PROGRESS,
|
||||
ENCRYPT_LEAF_NODE_COMPLETED,
|
||||
WRITE_LEAF_NODE_PENDING,
|
||||
WRITE_LEAF_NODE_IN_PROGRESS,
|
||||
WRITE_LEAF_NODE_COMPLETED,
|
||||
WRITE_INNER_NODE_PENDING,
|
||||
WRITE_INNER_NODE_IN_PROGRESS,
|
||||
WRITE_INNER_NODE_COMPLETED,
|
||||
WRITE_ROOT_NODE_PENDING,
|
||||
WRITE_ROOT_NODE_IN_PROGRESS,
|
||||
WRITE_ROOT_NODE_COMPLETED,
|
||||
COMPLETED
|
||||
};
|
||||
SUBMITTED, REQ_GENERATED, REQ_COMPLETE, READ_BLK_SUCCEEDED, WRITE_BLK_SUCCEEDED,
|
||||
DECRYPT_LEAF_DATA_SUCCEEDED, ENCRYPT_LEAF_DATA_SUCCEEDED, ALLOC_PBAS_SUCCEEDED };
|
||||
|
||||
struct Type_1_node_blocks
|
||||
Request *_req_ptr { nullptr };
|
||||
State _state { REQ_COMPLETE };
|
||||
Snapshot_index _snap_idx { 0 };
|
||||
Type_1_node_block_walk _t1_blks { };
|
||||
Type_1_node_walk _t1_nodes { };
|
||||
Tree_level_index _lvl { 0 };
|
||||
Virtual_block_address _vba { 0 };
|
||||
Tree_walk_pbas _old_pbas { };
|
||||
Tree_walk_pbas _new_pbas { };
|
||||
Hash _hash { };
|
||||
Number_of_blocks _num_blks { 0 };
|
||||
Generation _free_gen { 0 };
|
||||
Block _encoded_blk { };
|
||||
Block _data_blk { };
|
||||
bool _first_snapshot { false };
|
||||
bool _gen_req_success { false };
|
||||
|
||||
NONCOPYABLE(Virtual_block_device_channel);
|
||||
|
||||
template <typename REQUEST, typename... ARGS>
|
||||
void _generate_req(State_uint complete_state, bool &progress, ARGS &&... args)
|
||||
{
|
||||
Type_1_node_block items[TREE_MAX_LEVEL] { };
|
||||
};
|
||||
|
||||
struct Type_1_node_blocks_pbas
|
||||
{
|
||||
Physical_block_address items[TREE_MAX_LEVEL] { 0 };
|
||||
};
|
||||
|
||||
enum Tag_type
|
||||
{
|
||||
TAG_INVALID,
|
||||
TAG_VBD_CACHE,
|
||||
TAG_VBD_BLK_IO_WRITE_CLIENT_DATA,
|
||||
TAG_VBD_BLK_IO_READ_CLIENT_DATA,
|
||||
TAG_VBD_BLK_IO,
|
||||
TAG_VBD_FT_ALLOC_FOR_NON_RKG,
|
||||
TAG_VBD_FT_ALLOC_FOR_RKG_CURR_GEN_BLKS,
|
||||
TAG_VBD_FT_ALLOC_FOR_RKG_OLD_GEN_BLKS,
|
||||
TAG_VBD_CRYPTO_ENCRYPT,
|
||||
TAG_VBD_CRYPTO_DECRYPT,
|
||||
};
|
||||
|
||||
struct Generated_prim
|
||||
{
|
||||
enum Type { READ, WRITE };
|
||||
|
||||
Type op { READ };
|
||||
bool succ { false };
|
||||
Tag_type tg { TAG_INVALID };
|
||||
uint64_t blk_nr { 0 };
|
||||
uint64_t idx { 0 };
|
||||
};
|
||||
|
||||
Snapshot &snapshots(Snapshot_index idx)
|
||||
{
|
||||
if (idx < MAX_NR_OF_SNAPSHOTS)
|
||||
return _request._snapshots.items[idx];
|
||||
|
||||
class Snapshot_idx_too_large { };
|
||||
throw Snapshot_idx_too_large { };
|
||||
generate_req<REQUEST>(complete_state, progress, args..., _gen_req_success);
|
||||
_state = REQ_GENERATED;
|
||||
}
|
||||
|
||||
Snapshot &snap();
|
||||
void _request_submitted(Module_request &) override;
|
||||
|
||||
void _log_rekeying_pba_alloc() const;
|
||||
bool _request_complete() override { return _state == REQ_COMPLETE; }
|
||||
|
||||
Virtual_block_device_request _request { };
|
||||
State _state { SUBMITTED };
|
||||
Generated_prim _generated_prim { };
|
||||
Snapshot_index _snapshot_idx { 0 };
|
||||
Type_1_node_blocks _t1_blks { };
|
||||
Type_1_node_blocks_pbas _t1_blks_old_pbas { };
|
||||
Tree_level_index _t1_blk_idx { 0 };
|
||||
Virtual_block_address _vba { 0 };
|
||||
Type_1_node_walk _t1_node_walk { };
|
||||
Tree_walk_pbas _new_pbas { };
|
||||
Hash _hash { };
|
||||
Number_of_blocks _nr_of_blks { 0 };
|
||||
Generation _last_secured_gen { 0 };
|
||||
Generation _free_gen { 0 };
|
||||
Block _encoded_blk { };
|
||||
Block _data_blk { };
|
||||
Physical_block_address _data_blk_old_pba { 0 };
|
||||
bool _first_snapshot { false };
|
||||
void _generated_req_completed(State_uint) override;
|
||||
|
||||
void _generate_ft_req(State, bool, Free_tree_request::Type);
|
||||
|
||||
Snapshot &snap() { return _req_ptr->_snapshots.items[_snap_idx]; }
|
||||
|
||||
void _generate_write_blk_req(bool &);
|
||||
|
||||
bool _find_next_snap_to_rekey_vba_at(Snapshot_index &) const;
|
||||
|
||||
void _read_vba(bool &);
|
||||
|
||||
bool _check_and_decode_read_blk(bool &, bool);
|
||||
|
||||
Tree_node_index _node_idx(Tree_level_index, Virtual_block_address) const;
|
||||
|
||||
Type_1_node &_node(Tree_level_index, Virtual_block_address);
|
||||
|
||||
void _mark_req_successful(bool &);
|
||||
|
||||
void _mark_req_failed(bool &, char const *);
|
||||
|
||||
void _set_new_pbas_and_num_blks_for_alloc();
|
||||
|
||||
void _generate_ft_alloc_req_for_write_vba(bool &);
|
||||
|
||||
void _write_vba(bool &);
|
||||
|
||||
void _update_nodes_of_branch_of_written_vba();
|
||||
|
||||
void _rekey_vba(bool &);
|
||||
|
||||
void _generate_ft_alloc_req_for_rekeying(Tree_level_index, bool &);
|
||||
|
||||
void _add_new_root_lvl_to_snap();
|
||||
|
||||
void _add_new_branch_to_snap(Tree_level_index, Tree_node_index);
|
||||
|
||||
void _set_new_pbas_identical_to_curr_pbas();
|
||||
|
||||
void _generate_ft_alloc_req_for_resizing(Tree_level_index, bool &);
|
||||
|
||||
void _extension_step(bool &);
|
||||
|
||||
public:
|
||||
|
||||
Virtual_block_device_channel(Module_channel_id id) : Module_channel { VIRTUAL_BLOCK_DEVICE, id } { }
|
||||
|
||||
void execute(bool &);
|
||||
};
|
||||
|
||||
class Tresor::Virtual_block_device : public Module
|
||||
@ -259,149 +167,16 @@ class Tresor::Virtual_block_device : public Module
|
||||
private:
|
||||
|
||||
using Channel = Virtual_block_device_channel;
|
||||
using Request = Virtual_block_device_request;
|
||||
using Generated_prim = Channel::Generated_prim;
|
||||
using Type_1_node_blocks = Channel::Type_1_node_blocks;
|
||||
|
||||
enum { NR_OF_CHANNELS = 1 };
|
||||
enum { FIRST_T1_NODE_BLKS_IDX = 1 };
|
||||
enum { MAX_T1_NODE_BLKS_IDX = 6 };
|
||||
Constructible<Channel> _channels[1] { };
|
||||
|
||||
Channel _channels[NR_OF_CHANNELS] { };
|
||||
|
||||
static char const *_state_to_step_label(Channel::State state);
|
||||
|
||||
bool _handle_failed_generated_req(Channel &chan,
|
||||
bool &progress);
|
||||
|
||||
bool _find_next_snap_to_rekey_vba_at(Channel const &chan,
|
||||
Snapshot_index &next_snap_idx);
|
||||
|
||||
void _execute_read_vba (Channel &, uint64_t, bool &);
|
||||
void _execute_write_vba (Channel &, uint64_t, bool &);
|
||||
void _execute_rekey_vba (Channel &, uint64_t, bool &);
|
||||
void _execute_vbd_extension_step (Channel &, uint64_t, bool &);
|
||||
|
||||
void _mark_req_failed(Channel &chan,
|
||||
bool &progress,
|
||||
char const *str);
|
||||
|
||||
void _mark_req_successful(Channel &chan,
|
||||
bool &progress);
|
||||
|
||||
void _check_that_primitive_was_successful(Channel::Generated_prim const &);
|
||||
|
||||
void _execute_read_vba_read_inner_node_completed(Channel &channel,
|
||||
uint64_t const job_idx,
|
||||
bool &progress);
|
||||
|
||||
Virtual_block_address _tree_max_max_vba(Tree_degree snap_degree,
|
||||
Snapshot const &snap);
|
||||
|
||||
void _update_nodes_of_branch_of_written_vba(Snapshot &snapshot,
|
||||
uint64_t const snapshot_degree,
|
||||
uint64_t const vba,
|
||||
Tree_walk_pbas const &new_pbas,
|
||||
Hash const & leaf_hash,
|
||||
uint64_t curr_gen,
|
||||
Channel::Type_1_node_blocks &t1_blks);
|
||||
|
||||
void
|
||||
_alloc_pba_from_resizing_contingent(Physical_block_address &first_pba,
|
||||
Number_of_blocks &nr_of_pbas,
|
||||
Physical_block_address &allocated_pba);
|
||||
|
||||
void _set_args_in_order_to_write_client_data_to_leaf_node(Tree_walk_pbas const &,
|
||||
uint64_t const job_idx,
|
||||
Channel::State &,
|
||||
Channel::Generated_prim &,
|
||||
bool &progress);
|
||||
|
||||
void _set_new_pbas_identical_to_current_pbas(Channel &chan);
|
||||
|
||||
void
|
||||
_add_new_branch_to_snap_using_pba_contingent(Channel &chan,
|
||||
Tree_level_index mount_at_lvl,
|
||||
Tree_node_index mount_at_child_idx);
|
||||
|
||||
void
|
||||
_set_args_for_alloc_of_new_pbas_for_resizing(Channel &chan,
|
||||
uint64_t chan_idx,
|
||||
Tree_level_index min_lvl,
|
||||
bool &progress);
|
||||
|
||||
void _add_new_root_lvl_to_snap_using_pba_contingent(Channel &chan);
|
||||
|
||||
void _check_hash_of_read_type_1_node(Channel &chan,
|
||||
Snapshot const &snapshot,
|
||||
uint64_t const snapshots_degree,
|
||||
uint64_t const t1_blk_idx,
|
||||
Channel::Type_1_node_blocks const &t1_blks,
|
||||
uint64_t const vba);
|
||||
|
||||
void _initialize_new_pbas_and_determine_nr_of_pbas_to_allocate(uint64_t const curr_gen,
|
||||
Snapshot const &snapshot,
|
||||
uint64_t const snapshots_degree,
|
||||
uint64_t const vba,
|
||||
Channel::Type_1_node_blocks const &t1_blks,
|
||||
Tree_walk_pbas &new_pbas,
|
||||
uint64_t &nr_of_blks);
|
||||
|
||||
void _set_args_for_alloc_of_new_pbas_for_branch_of_written_vba(uint64_t curr_gen,
|
||||
Snapshot const &snapshot,
|
||||
uint64_t const snapshots_degree,
|
||||
uint64_t const vba,
|
||||
Channel::Type_1_node_blocks const &t1_blks,
|
||||
uint64_t const prim_idx,
|
||||
uint64_t &free_gen,
|
||||
Type_1_node_walk &t1_walk,
|
||||
Channel::State &state,
|
||||
Channel::Generated_prim &prim,
|
||||
bool &progress);
|
||||
|
||||
void _set_args_for_alloc_of_new_pbas_for_rekeying(Channel &chan,
|
||||
uint64_t chan_idx,
|
||||
Tree_level_index min_lvl);
|
||||
|
||||
void _set_args_in_order_to_read_type_1_node(Snapshot const &snapshot,
|
||||
uint64_t const snapshots_degree,
|
||||
uint64_t const t1_blk_idx,
|
||||
Channel::Type_1_node_blocks const &t1_blks,
|
||||
uint64_t const vba,
|
||||
uint64_t const job_idx,
|
||||
Channel::State &state,
|
||||
Channel::Generated_prim &prim,
|
||||
bool &progress);
|
||||
|
||||
void _set_args_for_write_back_of_t1_lvl(Tree_level_index const max_lvl_idx,
|
||||
uint64_t const t1_lvl_idx,
|
||||
uint64_t const pba,
|
||||
uint64_t const prim_idx,
|
||||
Channel::State &state,
|
||||
bool &progress,
|
||||
Channel::Generated_prim &prim);
|
||||
|
||||
/************
|
||||
** Module **
|
||||
************/
|
||||
|
||||
bool ready_to_submit_request() override;
|
||||
|
||||
void submit_request(Module_request &mod_req) override;
|
||||
|
||||
bool _peek_completed_request(uint8_t *buf_ptr,
|
||||
size_t buf_size) override;
|
||||
|
||||
void _drop_completed_request(Module_request &req) override;
|
||||
NONCOPYABLE(Virtual_block_device);
|
||||
|
||||
void execute(bool &) override;
|
||||
|
||||
bool _peek_generated_request(uint8_t *buf_ptr,
|
||||
size_t buf_size) override;
|
||||
public:
|
||||
|
||||
void _drop_generated_request(Module_request &mod_req) override;
|
||||
|
||||
void generated_request_complete(Module_request &req) override;
|
||||
Virtual_block_device();
|
||||
};
|
||||
|
||||
#endif /* _TRESOR__VIRTUAL_BLOCK_DEVICE_H_ */
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* \brief Module for doing free tree COW allocations on the meta tree
|
||||
* \brief Module for doing VBD COW allocations on the meta tree
|
||||
* \author Martin Stein
|
||||
* \date 2023-02-13
|
||||
*/
|
||||
@ -14,670 +14,209 @@
|
||||
/* tresor includes */
|
||||
#include <tresor/meta_tree.h>
|
||||
#include <tresor/block_io.h>
|
||||
#include <tresor/sha256_4k_hash.h>
|
||||
#include <tresor/hash.h>
|
||||
|
||||
using namespace Tresor;
|
||||
|
||||
|
||||
/***************
|
||||
** Utilities **
|
||||
***************/
|
||||
|
||||
static bool check_level_0_usable(Generation gen,
|
||||
Type_2_node &node)
|
||||
{
|
||||
return node.alloc_gen != gen;
|
||||
}
|
||||
|
||||
|
||||
/***********************
|
||||
** Meta_tree_request **
|
||||
***********************/
|
||||
|
||||
void Meta_tree_request::create(void *buf_ptr,
|
||||
size_t buf_size,
|
||||
uint64_t src_module_id,
|
||||
uint64_t src_request_id,
|
||||
size_t req_type,
|
||||
void *mt_root_pba_ptr,
|
||||
void *mt_root_gen_ptr,
|
||||
void *mt_root_hash_ptr,
|
||||
uint64_t mt_max_lvl,
|
||||
uint64_t mt_edges,
|
||||
uint64_t mt_leaves,
|
||||
uint64_t curr_gen,
|
||||
uint64_t old_pba)
|
||||
{
|
||||
Meta_tree_request req { src_module_id, src_request_id };
|
||||
req._type = (Type)req_type;
|
||||
req._mt_root_pba_ptr = (addr_t)mt_root_pba_ptr;
|
||||
req._mt_root_gen_ptr = (addr_t)mt_root_gen_ptr;
|
||||
req._mt_root_hash_ptr = (addr_t)mt_root_hash_ptr;
|
||||
req._mt_max_lvl = mt_max_lvl;
|
||||
req._mt_edges = mt_edges;
|
||||
req._mt_leaves = mt_leaves;
|
||||
req._current_gen = curr_gen;
|
||||
req._old_pba = old_pba;
|
||||
if (sizeof(req) > buf_size) {
|
||||
class Exception_2 { };
|
||||
throw Exception_2 { };
|
||||
}
|
||||
memcpy(buf_ptr, &req, sizeof(req));
|
||||
}
|
||||
|
||||
|
||||
Meta_tree_request::Meta_tree_request(Module_id src_module_id,
|
||||
Module_request_id src_request_id)
|
||||
:
|
||||
Module_request { src_module_id, src_request_id, META_TREE }
|
||||
{ }
|
||||
|
||||
|
||||
char const *Meta_tree_request::type_to_string(Type type)
|
||||
{
|
||||
switch (type) {
|
||||
case INVALID: return "invalid";
|
||||
case UPDATE: return "update";
|
||||
case ALLOC_PBA: return "alloc pba";
|
||||
}
|
||||
return "?";
|
||||
ASSERT_NEVER_REACHED;
|
||||
}
|
||||
|
||||
|
||||
/***************
|
||||
** Meta_tree **
|
||||
***************/
|
||||
|
||||
bool Meta_tree::_peek_generated_request(uint8_t *buf_ptr,
|
||||
size_t buf_size)
|
||||
{
|
||||
for (uint32_t id { 0 }; id < NR_OF_CHANNELS; id++) {
|
||||
|
||||
Channel &channel { _channels[id] };
|
||||
Local_cache_request const &local_req { channel._cache_request };
|
||||
if (local_req.state == Local_cache_request::PENDING) {
|
||||
|
||||
Block_io_request::Type blk_io_req_type {
|
||||
local_req.op == Local_cache_request::READ ?
|
||||
Block_io_request::READ :
|
||||
Local_cache_request::WRITE ?
|
||||
Block_io_request::WRITE :
|
||||
Block_io_request::INVALID };
|
||||
|
||||
if (blk_io_req_type == Block_io_request::INVALID) {
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
construct_in_buf<Block_io_request>(
|
||||
buf_ptr, buf_size, META_TREE, id, blk_io_req_type,
|
||||
0, 0, 0, local_req.pba, 0, 1,
|
||||
(void *)&channel._cache_request.block_data, nullptr);
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
void Meta_tree::_drop_generated_request(Module_request &mod_req)
|
||||
{
|
||||
Module_request_id const id { mod_req.src_request_id() };
|
||||
if (id >= NR_OF_CHANNELS) {
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
Local_cache_request &local_req { _channels[id]._cache_request };
|
||||
if (local_req.state != Local_cache_request::PENDING) {
|
||||
class Exception_2 { };
|
||||
throw Exception_2 { };
|
||||
}
|
||||
local_req.state = Local_cache_request::IN_PROGRESS;
|
||||
}
|
||||
|
||||
|
||||
void Meta_tree::generated_request_complete(Module_request &mod_req)
|
||||
{
|
||||
Module_request_id const id { mod_req.src_request_id() };
|
||||
if (id >= NR_OF_CHANNELS) {
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
Local_cache_request &local_req { _channels[id]._cache_request };
|
||||
if (local_req.state != Local_cache_request::IN_PROGRESS) {
|
||||
class Exception_2 { };
|
||||
throw Exception_2 { };
|
||||
}
|
||||
if (mod_req.dst_module_id() != BLOCK_IO) {
|
||||
class Exception_3 { };
|
||||
throw Exception_3 { };
|
||||
}
|
||||
Block_io_request &blk_io_req { *static_cast<Block_io_request *>(&mod_req) };
|
||||
Channel &channel { _channels[id] };
|
||||
if (!blk_io_req.success()) {
|
||||
|
||||
channel._request._success = false;
|
||||
channel._request._new_pba = INVALID_PBA;
|
||||
channel._state = Channel::COMPLETE;
|
||||
return;
|
||||
|
||||
}
|
||||
Type_1_info &t1_info { channel._level_n_nodes[local_req.level] };
|
||||
Type_2_info &t2_info { channel._level_1_node };
|
||||
|
||||
switch (local_req.op) {
|
||||
case Local_cache_request::SYNC:
|
||||
|
||||
class Exception_3 { };
|
||||
throw Exception_3 { };
|
||||
|
||||
case Local_cache_request::READ:
|
||||
|
||||
if (local_req.level > T2_NODE_LVL) {
|
||||
|
||||
if (!check_sha256_4k_hash(channel._cache_request.block_data, t1_info.node.hash)) {
|
||||
|
||||
channel._state = Channel::TREE_HASH_MISMATCH;
|
||||
|
||||
} else {
|
||||
|
||||
t1_info.entries.decode_from_blk(channel._cache_request.block_data);
|
||||
t1_info.index = 0;
|
||||
t1_info.state = Type_1_info::READ_COMPLETE;
|
||||
}
|
||||
} else if (local_req.level == T2_NODE_LVL) {
|
||||
|
||||
if (!check_sha256_4k_hash(channel._cache_request.block_data, t2_info.node.hash)) {
|
||||
|
||||
channel._state = Channel::TREE_HASH_MISMATCH;
|
||||
|
||||
} else {
|
||||
|
||||
t2_info.entries.decode_from_blk(channel._cache_request.block_data);
|
||||
t2_info.index = 0;
|
||||
t2_info.state = Type_2_info::READ_COMPLETE;
|
||||
}
|
||||
} else {
|
||||
class Exception_4 { };
|
||||
throw Exception_4 { };
|
||||
}
|
||||
break;
|
||||
|
||||
case Local_cache_request::WRITE:
|
||||
|
||||
if (local_req.level > T2_NODE_LVL) {
|
||||
|
||||
t1_info.state = Type_1_info::WRITE_COMPLETE;
|
||||
|
||||
} else if (local_req.level == T2_NODE_LVL) {
|
||||
|
||||
t2_info.state = Type_2_info::WRITE_COMPLETE;
|
||||
|
||||
} else {
|
||||
|
||||
class Exception_5 { };
|
||||
throw Exception_5 { };
|
||||
}
|
||||
break;
|
||||
}
|
||||
local_req = Local_cache_request {
|
||||
Local_cache_request::INVALID, Local_cache_request::READ,
|
||||
false, 0, 0, nullptr };
|
||||
}
|
||||
|
||||
|
||||
void Meta_tree::_mark_req_failed(Channel &chan,
|
||||
bool &progress,
|
||||
char const *str)
|
||||
{
|
||||
error(chan._request.type_name(), " request failed, reason: \"", str, "\"");
|
||||
chan._request._success = false;
|
||||
chan._state = Channel::COMPLETE;
|
||||
progress = true;
|
||||
}
|
||||
|
||||
|
||||
void Meta_tree::_mark_req_successful(Channel &channel,
|
||||
bool &progress)
|
||||
{
|
||||
channel._request._success = true;
|
||||
channel._state = Channel::COMPLETE;
|
||||
progress = true;
|
||||
}
|
||||
|
||||
|
||||
void Meta_tree::_update_parent(Type_1_node &node,
|
||||
Block const &blk,
|
||||
uint64_t gen,
|
||||
uint64_t pba)
|
||||
{
|
||||
calc_sha256_4k_hash(blk, node.hash);
|
||||
node.gen = gen;
|
||||
node.pba = pba;
|
||||
}
|
||||
|
||||
|
||||
void Meta_tree::_exchange_nv_inner_nodes(Channel &channel,
|
||||
Type_2_node &t2_entry,
|
||||
bool &exchanged)
|
||||
{
|
||||
Request &req { channel._request };
|
||||
uint64_t pba;
|
||||
exchanged = false;
|
||||
|
||||
// loop non-volatile inner nodes
|
||||
for (uint64_t lvl { MT_LOWEST_T1_LVL }; lvl <= TREE_MAX_LEVEL; lvl++) {
|
||||
|
||||
Type_1_info &t1_info { channel._level_n_nodes[lvl] };
|
||||
if (t1_info.node.valid() && !t1_info.volatil) {
|
||||
|
||||
pba = t1_info.node.pba;
|
||||
t1_info.node.pba = t2_entry.pba;
|
||||
t1_info.node.gen = req._current_gen;
|
||||
t1_info.volatil = true;
|
||||
t2_entry.pba = pba;
|
||||
t2_entry.alloc_gen = req._current_gen;
|
||||
t2_entry.free_gen = req._current_gen;
|
||||
t2_entry.reserved = false;
|
||||
|
||||
exchanged = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Meta_tree::_exchange_nv_level_1_node(Channel &channel,
|
||||
Type_2_node &t2_entry,
|
||||
bool &exchanged)
|
||||
{
|
||||
Request &req { channel._request };
|
||||
uint64_t pba { channel._level_1_node.node.pba };
|
||||
exchanged = false;
|
||||
|
||||
if (!channel._level_1_node.volatil) {
|
||||
|
||||
channel._level_1_node.node.pba = t2_entry.pba;
|
||||
channel._level_1_node.volatil = true;
|
||||
|
||||
t2_entry.pba = pba;
|
||||
t2_entry.alloc_gen = req._current_gen;
|
||||
t2_entry.free_gen = req._current_gen;
|
||||
t2_entry.reserved = false;
|
||||
|
||||
exchanged = true;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Meta_tree::_exchange_request_pba(Channel &channel,
|
||||
Type_2_node &t2_entry)
|
||||
{
|
||||
Request &req { channel._request };
|
||||
req._success = true;
|
||||
req._new_pba = t2_entry.pba;
|
||||
channel._finished = true;
|
||||
|
||||
t2_entry.pba = req._old_pba;
|
||||
t2_entry.alloc_gen = req._current_gen;
|
||||
t2_entry.free_gen = req._current_gen;
|
||||
t2_entry.reserved = false;
|
||||
}
|
||||
|
||||
|
||||
void Meta_tree::_handle_level_0_nodes(Channel &channel,
|
||||
bool &handled)
|
||||
{
|
||||
Request &req { channel._request };
|
||||
Type_2_node tmp_t2_entry;
|
||||
handled = false;
|
||||
|
||||
for(unsigned i = 0; i <= req._mt_edges - 1; i++) {
|
||||
|
||||
tmp_t2_entry = channel._level_1_node.entries.nodes[i];
|
||||
|
||||
if (tmp_t2_entry.valid() &&
|
||||
check_level_0_usable(req._current_gen, tmp_t2_entry))
|
||||
{
|
||||
bool exchanged_level_1;
|
||||
bool exchanged_level_n { false };
|
||||
bool exchanged_request_pba { false };
|
||||
|
||||
// first try to exchange the level 1 node ...
|
||||
_exchange_nv_level_1_node(
|
||||
channel, tmp_t2_entry, exchanged_level_1);
|
||||
|
||||
// ... next the inner level n nodes ...
|
||||
if (!exchanged_level_1)
|
||||
_exchange_nv_inner_nodes(
|
||||
channel, tmp_t2_entry, exchanged_level_n);
|
||||
|
||||
// ... and than satisfy the original mt request
|
||||
if (!exchanged_level_1 && !exchanged_level_n) {
|
||||
_exchange_request_pba(channel, tmp_t2_entry);
|
||||
exchanged_request_pba = true;
|
||||
}
|
||||
channel._level_1_node.entries.nodes[i] = tmp_t2_entry;
|
||||
handled = true;
|
||||
|
||||
if (exchanged_request_pba)
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Meta_tree::_handle_level_1_node(Channel &channel,
|
||||
bool &handled)
|
||||
{
|
||||
Type_1_info &t1_info { channel._level_n_nodes[MT_LOWEST_T1_LVL] };
|
||||
Type_2_info &t2_info { channel._level_1_node };
|
||||
Request &req { channel._request };
|
||||
|
||||
switch (t2_info.state) {
|
||||
case Type_2_info::INVALID:
|
||||
|
||||
handled = false;
|
||||
break;
|
||||
|
||||
case Type_2_info::READ:
|
||||
|
||||
channel._cache_request = Local_cache_request {
|
||||
Local_cache_request::PENDING, Local_cache_request::READ, false,
|
||||
t2_info.node.pba, 1, nullptr };
|
||||
|
||||
handled = true;
|
||||
break;
|
||||
|
||||
case Type_2_info::READ_COMPLETE:
|
||||
|
||||
_handle_level_0_nodes(channel, handled);
|
||||
if (handled) {
|
||||
t2_info.state = Type_2_info::WRITE;
|
||||
} else {
|
||||
t2_info.state = Type_2_info::COMPLETE;
|
||||
handled = true;
|
||||
}
|
||||
break;
|
||||
|
||||
case Type_2_info::WRITE:
|
||||
{
|
||||
Block block_data { };
|
||||
t2_info.entries.encode_to_blk(block_data);
|
||||
|
||||
_update_parent(
|
||||
t1_info.entries.nodes[t1_info.index], block_data,
|
||||
req._current_gen, t2_info.node.pba);
|
||||
|
||||
channel._cache_request = Local_cache_request {
|
||||
Local_cache_request::PENDING, Local_cache_request::WRITE, false,
|
||||
t2_info.node.pba, 1, &block_data };
|
||||
|
||||
t1_info.dirty = true;
|
||||
handled = true;
|
||||
break;
|
||||
}
|
||||
case Type_2_info::WRITE_COMPLETE:
|
||||
|
||||
t1_info.index++;
|
||||
t2_info.state = Type_2_info::INVALID;
|
||||
handled = true;
|
||||
break;
|
||||
|
||||
case Type_2_info::COMPLETE:
|
||||
|
||||
t1_info.index++;
|
||||
t2_info.state = Type_2_info::INVALID;
|
||||
handled = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Meta_tree::_execute_update(Channel &channel,
|
||||
bool &progress)
|
||||
{
|
||||
bool handled_level_1_node;
|
||||
bool handled_level_n_nodes;
|
||||
_handle_level_1_node(channel, handled_level_1_node);
|
||||
if (handled_level_1_node) {
|
||||
progress = true;
|
||||
return;
|
||||
}
|
||||
_handle_level_n_nodes(channel, handled_level_n_nodes);
|
||||
progress = progress || handled_level_n_nodes;
|
||||
}
|
||||
Meta_tree_request::Meta_tree_request(Module_id src_module_id, Module_channel_id src_channel_id,
|
||||
Type type, Tree_root &mt, Generation curr_gen,
|
||||
Physical_block_address &pba, bool &success)
|
||||
:
|
||||
Module_request { src_module_id, src_channel_id, META_TREE }, _type { type }, _mt { mt },
|
||||
_curr_gen { curr_gen }, _pba { pba }, _success { success }
|
||||
{ }
|
||||
|
||||
|
||||
void Meta_tree::execute(bool &progress)
|
||||
{
|
||||
for (Channel &channel : _channels) {
|
||||
for_each_channel<Channel>([&] (Channel &chan) {
|
||||
chan.execute(progress); });
|
||||
}
|
||||
|
||||
if (channel._cache_request.state != Local_cache_request::INVALID)
|
||||
continue;
|
||||
|
||||
switch(channel._state) {
|
||||
case Channel::INVALID:
|
||||
break;
|
||||
case Channel::UPDATE:
|
||||
_execute_update(channel, progress);
|
||||
break;
|
||||
case Channel::COMPLETE:
|
||||
break;
|
||||
case Channel::TREE_HASH_MISMATCH:
|
||||
_mark_req_failed(channel, progress, "node hash mismatch");
|
||||
break;
|
||||
bool Meta_tree_channel::_can_alloc_pba_of(Type_2_node &node)
|
||||
{
|
||||
return node.valid() && node.alloc_gen != _req_ptr->_curr_gen;
|
||||
}
|
||||
|
||||
|
||||
void Meta_tree_channel::_generated_req_completed(State_uint state_uint)
|
||||
{
|
||||
if (!_generated_req_success) {
|
||||
error("meta tree: request (", *_req_ptr, ") failed because generated request failed)");
|
||||
_req_ptr->_success = false;
|
||||
_state = COMPLETE;
|
||||
_req_ptr = nullptr;
|
||||
return;
|
||||
}
|
||||
_state = (State)state_uint;
|
||||
}
|
||||
|
||||
|
||||
void Meta_tree_channel::_alloc_pba_of(Type_2_node &t2_node, Physical_block_address &pba)
|
||||
{
|
||||
Request &req { *_req_ptr };
|
||||
Physical_block_address old_pba { pba };
|
||||
pba = t2_node.pba;
|
||||
t2_node.pba = old_pba;
|
||||
t2_node.alloc_gen = req._curr_gen;
|
||||
t2_node.free_gen = req._curr_gen;
|
||||
t2_node.reserved = false;
|
||||
}
|
||||
|
||||
|
||||
void Meta_tree_channel::_mark_req_failed(bool &progress, char const *str)
|
||||
{
|
||||
error(Request::type_to_string(_req_ptr->_type), " request failed, reason: \"", str, "\"");
|
||||
_req_ptr->_success = false;
|
||||
_state = COMPLETE;
|
||||
_req_ptr = nullptr;
|
||||
progress = true;
|
||||
}
|
||||
|
||||
|
||||
void Meta_tree_channel::_mark_req_successful(bool &progress)
|
||||
{
|
||||
_req_ptr->_success = true;
|
||||
_state = COMPLETE;
|
||||
_req_ptr = nullptr;
|
||||
progress = true;
|
||||
}
|
||||
|
||||
|
||||
void Meta_tree_channel::_start_tree_traversal(bool &progress)
|
||||
{
|
||||
Request &req { *_req_ptr };
|
||||
_lvl = req._mt.max_lvl;
|
||||
_node_idx[_lvl] = 0;
|
||||
_t1_blks[_lvl].nodes[_node_idx[_lvl]] = req._mt.t1_node();
|
||||
_generate_req<Block_io::Read>(SEEK_DOWN, progress, req._mt.pba, _blk);
|
||||
}
|
||||
|
||||
|
||||
void Meta_tree_channel::_traverse_curr_node(bool &progress)
|
||||
{
|
||||
Request &req { *_req_ptr };
|
||||
if (_lvl) {
|
||||
Type_1_node &t1_node { _t1_blks[_lvl].nodes[_node_idx[_lvl]] };
|
||||
if (t1_node.pba)
|
||||
_generate_req<Block_io::Read>(SEEK_DOWN, progress, t1_node.pba, _blk);
|
||||
else {
|
||||
_state = SEEK_LEFT_OR_UP;
|
||||
progress = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Meta_tree::Meta_tree() { }
|
||||
|
||||
|
||||
bool Meta_tree::_peek_completed_request(uint8_t *buf_ptr,
|
||||
size_t buf_size)
|
||||
{
|
||||
for (Channel &channel : _channels) {
|
||||
if (channel._state == Channel::COMPLETE) {
|
||||
if (sizeof(channel._request) > buf_size) {
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
memcpy(buf_ptr, &channel._request, sizeof(channel._request));
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
void Meta_tree::_drop_completed_request(Module_request &req)
|
||||
{
|
||||
Module_request_id id { 0 };
|
||||
id = req.dst_request_id();
|
||||
if (id >= NR_OF_CHANNELS) {
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
if (_channels[id]._state != Channel::COMPLETE) {
|
||||
class Exception_2 { };
|
||||
throw Exception_2 { };
|
||||
}
|
||||
_channels[id]._state = Channel::INVALID;
|
||||
}
|
||||
|
||||
|
||||
bool Meta_tree::ready_to_submit_request()
|
||||
{
|
||||
for (Channel &channel : _channels) {
|
||||
if (channel._state == Channel::INVALID)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
bool Meta_tree::_node_volatile(Type_1_node const &node,
|
||||
uint64_t gen)
|
||||
{
|
||||
return node.gen == 0 || node.gen != gen;
|
||||
}
|
||||
|
||||
|
||||
void Meta_tree::submit_request(Module_request &mod_req)
|
||||
{
|
||||
for (Module_request_id id { 0 }; id < NR_OF_CHANNELS; id++) {
|
||||
Channel &chan { _channels[id] };
|
||||
if (chan._state == Channel::INVALID) {
|
||||
|
||||
mod_req.dst_request_id(id);
|
||||
|
||||
chan._request = *static_cast<Request *>(&mod_req);
|
||||
chan._finished = false;
|
||||
chan._state = Channel::UPDATE;
|
||||
for (Type_1_info &t1_info : chan._level_n_nodes) {
|
||||
t1_info = Type_1_info { };
|
||||
}
|
||||
chan._level_1_node = Type_2_info { };
|
||||
|
||||
Request &req { chan._request };
|
||||
Type_1_node root_node { };
|
||||
root_node.pba = *(uint64_t *)req._mt_root_pba_ptr;
|
||||
root_node.gen = *(uint64_t *)req._mt_root_gen_ptr;
|
||||
memcpy(&root_node.hash, (uint8_t *)req._mt_root_hash_ptr,
|
||||
HASH_SIZE);
|
||||
|
||||
chan._level_n_nodes[req._mt_max_lvl].node = root_node;
|
||||
chan._level_n_nodes[req._mt_max_lvl].state = Type_1_info::READ;
|
||||
chan._level_n_nodes[req._mt_max_lvl].volatil =
|
||||
_node_volatile(root_node, req._current_gen);
|
||||
|
||||
return;
|
||||
}
|
||||
}
|
||||
class Invalid_call { };
|
||||
throw Invalid_call { };
|
||||
}
|
||||
|
||||
|
||||
void Meta_tree::_handle_level_n_nodes(Channel &channel,
|
||||
bool &handled)
|
||||
{
|
||||
Request &req { channel._request };
|
||||
handled = false;
|
||||
|
||||
for (uint64_t lvl { MT_LOWEST_T1_LVL }; lvl <= TREE_MAX_LEVEL; lvl++) {
|
||||
|
||||
Type_1_info &t1_info { channel._level_n_nodes[lvl] };
|
||||
|
||||
switch (t1_info.state) {
|
||||
case Type_1_info::INVALID:
|
||||
|
||||
break;
|
||||
|
||||
case Type_1_info::READ:
|
||||
|
||||
channel._cache_request = Local_cache_request {
|
||||
Local_cache_request::PENDING, Local_cache_request::READ, false,
|
||||
t1_info.node.pba, lvl, nullptr };
|
||||
|
||||
handled = true;
|
||||
return;
|
||||
|
||||
case Type_1_info::READ_COMPLETE:
|
||||
|
||||
if (t1_info.index < req._mt_edges &&
|
||||
t1_info.entries.nodes[t1_info.index].valid() &&
|
||||
!channel._finished) {
|
||||
|
||||
if (lvl != MT_LOWEST_T1_LVL) {
|
||||
channel._level_n_nodes[lvl - 1] = {
|
||||
Type_1_info::READ, t1_info.entries.nodes[t1_info.index],
|
||||
{ }, 0, false,
|
||||
_node_volatile(t1_info.node, req._current_gen) };
|
||||
|
||||
} else {
|
||||
channel._level_1_node = {
|
||||
Type_2_info::READ, t1_info.entries.nodes[t1_info.index],
|
||||
{ }, 0,
|
||||
_node_volatile(t1_info.node, req._current_gen) };
|
||||
} else {
|
||||
Type_2_node &t2_node { _t2_blk.nodes[_node_idx[_lvl]] };
|
||||
if (_can_alloc_pba_of(t2_node)) {
|
||||
_alloc_pba_of(t2_node, _req_ptr->_pba);
|
||||
for (Tree_level_index lvl { 1 }; lvl <= req._mt.max_lvl; lvl++) {
|
||||
Type_1_node &t1_node { _t1_blks[lvl].nodes[_node_idx[lvl]] };
|
||||
if (!t1_node.is_volatile(req._curr_gen)) {
|
||||
bool pba_allocated { false };
|
||||
for (Type_2_node &t2_node : _t2_blk.nodes) {
|
||||
if (_can_alloc_pba_of(t2_node)) {
|
||||
_alloc_pba_of(t2_node, t1_node.pba);
|
||||
pba_allocated = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
ASSERT(pba_allocated);
|
||||
}
|
||||
|
||||
} else {
|
||||
|
||||
if (t1_info.dirty)
|
||||
t1_info.state = Type_1_info::WRITE;
|
||||
else
|
||||
t1_info.state = Type_1_info::COMPLETE;
|
||||
}
|
||||
handled = true;
|
||||
return;
|
||||
|
||||
case Type_1_info::WRITE:
|
||||
{
|
||||
Block block_data;
|
||||
t1_info.entries.encode_to_blk(block_data);
|
||||
|
||||
if (lvl == req._mt_max_lvl) {
|
||||
|
||||
Type_1_node root_node { };
|
||||
root_node.pba = *(uint64_t *)req._mt_root_pba_ptr;
|
||||
root_node.gen = *(uint64_t *)req._mt_root_gen_ptr;
|
||||
memcpy(&root_node.hash, (uint8_t *)req._mt_root_hash_ptr,
|
||||
HASH_SIZE);
|
||||
|
||||
_update_parent(
|
||||
root_node, block_data, req._current_gen,
|
||||
t1_info.node.pba);
|
||||
|
||||
*(uint64_t *)req._mt_root_pba_ptr = root_node.pba;
|
||||
*(uint64_t *)req._mt_root_gen_ptr = root_node.gen;
|
||||
memcpy((uint8_t *)req._mt_root_hash_ptr, &root_node.hash,
|
||||
HASH_SIZE);
|
||||
|
||||
channel._root_dirty = true;
|
||||
|
||||
} else {
|
||||
|
||||
Type_1_info &parent { channel._level_n_nodes[lvl + 1] };
|
||||
_update_parent(
|
||||
parent.entries.nodes[parent.index], block_data,
|
||||
req._current_gen, t1_info.node.pba);
|
||||
|
||||
parent.dirty = true;
|
||||
}
|
||||
channel._cache_request = Local_cache_request {
|
||||
Local_cache_request::PENDING, Local_cache_request::WRITE,
|
||||
false, t1_info.node.pba, lvl, &block_data };
|
||||
|
||||
handled = true;
|
||||
return;
|
||||
}
|
||||
case Type_1_info::WRITE_COMPLETE:
|
||||
|
||||
if (lvl == req._mt_max_lvl)
|
||||
channel._state = Channel::COMPLETE;
|
||||
else
|
||||
channel._level_n_nodes[lvl + 1].index++;
|
||||
|
||||
channel._cache_request = Local_cache_request {
|
||||
Local_cache_request::INVALID, Local_cache_request::READ,
|
||||
false, 0, 0, nullptr };
|
||||
|
||||
t1_info.state = Type_1_info::INVALID;
|
||||
handled = true;
|
||||
return;
|
||||
|
||||
case Type_1_info::COMPLETE:
|
||||
|
||||
if (lvl == req._mt_max_lvl)
|
||||
channel._state = Channel::COMPLETE;
|
||||
else
|
||||
channel._level_n_nodes[lvl + 1].index++;
|
||||
|
||||
t1_info.state = Type_1_info::INVALID;
|
||||
handled = true;
|
||||
return;
|
||||
}
|
||||
_state = WRITE_BLK;
|
||||
} else
|
||||
_state = SEEK_LEFT_OR_UP;
|
||||
progress = true;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Meta_tree_channel::execute(bool &progress)
|
||||
{
|
||||
if (!_req_ptr)
|
||||
return;
|
||||
|
||||
Request &req { *_req_ptr };
|
||||
switch (_state) {
|
||||
case REQ_SUBMITTED:
|
||||
|
||||
_start_tree_traversal(progress);
|
||||
break;
|
||||
|
||||
case SEEK_DOWN:
|
||||
|
||||
if (!check_hash(_blk, _t1_blks[_lvl].nodes[_node_idx[_lvl]].hash)) {
|
||||
_mark_req_failed(progress, "hash mismatch");
|
||||
break;
|
||||
}
|
||||
_lvl--;
|
||||
_node_idx[_lvl] = 0;
|
||||
if (_lvl)
|
||||
_t1_blks[_lvl].decode_from_blk(_blk);
|
||||
else
|
||||
_t2_blk.decode_from_blk(_blk);
|
||||
_traverse_curr_node(progress);
|
||||
break;
|
||||
|
||||
case SEEK_LEFT_OR_UP:
|
||||
|
||||
if (_lvl < req._mt.max_lvl) {
|
||||
if (_node_idx[_lvl] < req._mt.degree - 1) {
|
||||
_node_idx[_lvl]++;
|
||||
_traverse_curr_node(progress);
|
||||
} else {
|
||||
_lvl++;
|
||||
_state = SEEK_LEFT_OR_UP;
|
||||
progress = true;
|
||||
}
|
||||
} else
|
||||
_mark_req_failed(progress, "not enough free pbas");
|
||||
break;
|
||||
|
||||
case WRITE_BLK:
|
||||
|
||||
if (_lvl < req._mt.max_lvl) {
|
||||
if (_lvl)
|
||||
_t1_blks[_lvl].encode_to_blk(_blk);
|
||||
else
|
||||
_t2_blk.encode_to_blk(_blk);
|
||||
_lvl++;
|
||||
Type_1_node &t1_node { _t1_blks[_lvl].nodes[_node_idx[_lvl]] };
|
||||
t1_node.gen = req._curr_gen;
|
||||
calc_hash(_blk, t1_node.hash);
|
||||
_generate_req<Block_io::Write>(WRITE_BLK, progress, t1_node.pba, _blk);
|
||||
} else {
|
||||
req._mt.t1_node(_t1_blks[_lvl].nodes[_node_idx[_lvl]]);
|
||||
_mark_req_successful(progress);
|
||||
}
|
||||
break;
|
||||
|
||||
default: break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Meta_tree_channel::_request_submitted(Module_request &mod_req)
|
||||
{
|
||||
_req_ptr = static_cast<Request *>(&mod_req);
|
||||
_state = REQ_SUBMITTED;
|
||||
}
|
||||
|
||||
|
||||
Meta_tree::Meta_tree()
|
||||
{
|
||||
Module_channel_id id { 0 };
|
||||
for (Constructible<Channel> &chan : _channels) {
|
||||
chan.construct(id++);
|
||||
add_channel(*chan);
|
||||
}
|
||||
}
|
||||
|
@ -18,40 +18,12 @@
|
||||
using namespace Tresor;
|
||||
|
||||
|
||||
/********************
|
||||
** Module_request **
|
||||
********************/
|
||||
|
||||
Module_request::Module_request(Module_id src_module_id,
|
||||
Module_request_id src_request_id,
|
||||
Module_id dst_module_id)
|
||||
Module_request::Module_request(Module_id src_module_id, Module_channel_id src_chan_id, Module_id dst_module_id)
|
||||
:
|
||||
_src_module_id { src_module_id },
|
||||
_src_request_id { src_request_id },
|
||||
_dst_module_id { dst_module_id }
|
||||
_src_module_id { src_module_id }, _src_chan_id { src_chan_id }, _dst_module_id { dst_module_id }
|
||||
{ }
|
||||
|
||||
|
||||
String<32> Module_request::src_request_id_str() const
|
||||
{
|
||||
return
|
||||
_src_request_id == INVALID_MODULE_REQUEST_ID ?
|
||||
String<32> { "?" } : String<32> { _src_request_id };
|
||||
}
|
||||
|
||||
|
||||
String<32> Module_request::dst_request_id_str() const
|
||||
{
|
||||
return
|
||||
_dst_request_id == INVALID_MODULE_REQUEST_ID ?
|
||||
String<32> { "?" } : String<32> { _dst_request_id };
|
||||
}
|
||||
|
||||
|
||||
/**********************
|
||||
** Global functions **
|
||||
**********************/
|
||||
|
||||
char const *Tresor::module_name(Module_id id)
|
||||
{
|
||||
switch (id) {
|
||||
@ -65,16 +37,109 @@ char const *Tresor::module_name(Module_id id)
|
||||
case CLIENT_DATA: return "client_data";
|
||||
case TRUST_ANCHOR: return "trust_anchor";
|
||||
case COMMAND_POOL: return "command_pool";
|
||||
case BLOCK_ALLOCATOR: return "block_allocator";
|
||||
case VBD_INITIALIZER: return "vbd_initializer";
|
||||
case FT_INITIALIZER: return "ft_initializer";
|
||||
case SB_INITIALIZER: return "sb_initializer";
|
||||
case SB_CHECK: return "sb_check";
|
||||
case VBD_CHECK: return "vbd_check";
|
||||
case FT_CHECK: return "ft_check";
|
||||
case FT_RESIZING: return "ft_resizing";
|
||||
case SPLITTER: return "splitter";
|
||||
case REQUEST_POOL: return "request_pool";
|
||||
default: break;
|
||||
}
|
||||
return "?";
|
||||
ASSERT_NEVER_REACHED;
|
||||
}
|
||||
|
||||
|
||||
void Module_channel::generated_req_completed()
|
||||
{
|
||||
ASSERT(_gen_req_state == IN_PROGRESS);
|
||||
_gen_req_state = NONE;
|
||||
_generated_req_completed(_gen_req_complete_state);
|
||||
}
|
||||
|
||||
|
||||
bool Module_channel::try_submit_request(Module_request &req)
|
||||
{
|
||||
if (_req_ptr)
|
||||
return false;
|
||||
|
||||
req.dst_chan_id(_id);
|
||||
_req_ptr = &req;
|
||||
_request_submitted(req);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
bool Module::try_submit_request(Module_request &req)
|
||||
{
|
||||
bool success { false };
|
||||
for_each_channel([&] (Module_channel &chan) {
|
||||
if (success)
|
||||
return;
|
||||
|
||||
success = chan.try_submit_request(req);
|
||||
});
|
||||
return success;
|
||||
}
|
||||
|
||||
|
||||
void Module_composition::execute_modules()
|
||||
{
|
||||
bool progress { true };
|
||||
while (progress) {
|
||||
|
||||
progress = false;
|
||||
for (Module_id id { 0 }; id <= MAX_MODULE_ID; id++) {
|
||||
if (!_module_ptrs[id])
|
||||
continue;
|
||||
|
||||
Module &mod { *_module_ptrs[id] };
|
||||
mod.execute(progress);
|
||||
mod.for_each_generated_request([&] (Module_request &req) {
|
||||
ASSERT(req.dst_module_id() <= MAX_MODULE_ID);
|
||||
ASSERT(_module_ptrs[req.dst_module_id()]);
|
||||
Module &dst_module { *_module_ptrs[req.dst_module_id()] };
|
||||
if (dst_module.try_submit_request(req)) {
|
||||
if (VERBOSE_MODULE_COMMUNICATION)
|
||||
log(module_name(id), " ", req.src_chan_id(), " --", req, "--> ",
|
||||
module_name(req.dst_module_id()), " ", req.dst_chan_id());
|
||||
|
||||
progress = true;
|
||||
return true;
|
||||
}
|
||||
if (VERBOSE_MODULE_COMMUNICATION)
|
||||
log(module_name(id), " ", req.src_chan_id(), " --", req, "-| ", module_name(req.dst_module_id()));
|
||||
|
||||
return false;
|
||||
});
|
||||
mod.for_each_completed_request([&] (Module_request &req) {
|
||||
ASSERT(req.src_module_id() <= MAX_MODULE_ID);
|
||||
if (VERBOSE_MODULE_COMMUNICATION)
|
||||
log(module_name(req.src_module_id()), " ", req.src_chan_id(), " <--", req,
|
||||
"-- ", module_name(id), " ", req.dst_chan_id());
|
||||
|
||||
Module &src_module { *_module_ptrs[req.src_module_id()] };
|
||||
src_module.with_channel(req.src_chan_id(), [&] (Module_channel &chan) {
|
||||
chan.generated_req_completed(); });
|
||||
progress = true;
|
||||
});
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
void Module_composition::add_module(Module_id module_id, Module &mod)
|
||||
{
|
||||
ASSERT(module_id <= MAX_MODULE_ID);
|
||||
ASSERT(!_module_ptrs[module_id]);
|
||||
_module_ptrs[module_id] = &mod;
|
||||
}
|
||||
|
||||
|
||||
void Module_composition::remove_module(Module_id module_id)
|
||||
{
|
||||
ASSERT(module_id <= MAX_MODULE_ID);
|
||||
ASSERT(_module_ptrs[module_id]);
|
||||
_module_ptrs[module_id] = nullptr;
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -11,9 +11,6 @@
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
/* base includes */
|
||||
#include <base/log.h>
|
||||
|
||||
/* tresor includes */
|
||||
#include <tresor/sb_check.h>
|
||||
#include <tresor/vbd_check.h>
|
||||
@ -22,487 +19,149 @@
|
||||
|
||||
using namespace Tresor;
|
||||
|
||||
|
||||
/**********************
|
||||
** Sb_check_request **
|
||||
**********************/
|
||||
|
||||
Sb_check_request::Sb_check_request(Module_id src_module_id,
|
||||
Module_request_id src_request_id)
|
||||
Sb_check_request::Sb_check_request(Module_id src_mod, Module_channel_id src_chan, bool &success)
|
||||
:
|
||||
Module_request { src_module_id, src_request_id, SB_CHECK }
|
||||
Module_request { src_mod, src_chan, SB_CHECK }, _success { success }
|
||||
{ }
|
||||
|
||||
|
||||
void Sb_check_request::create(void *buf_ptr,
|
||||
size_t buf_size,
|
||||
uint64_t src_module_id,
|
||||
uint64_t src_request_id,
|
||||
size_t req_type)
|
||||
void Sb_check_channel::_generated_req_completed(State_uint state_uint)
|
||||
{
|
||||
Sb_check_request req { src_module_id, src_request_id };
|
||||
req._type = (Type)req_type;
|
||||
|
||||
if (sizeof(req) > buf_size) {
|
||||
class Bad_size_0 { };
|
||||
throw Bad_size_0 { };
|
||||
if (!_generated_req_success) {
|
||||
error("sb check: request (", *_req_ptr, ") failed because generated request failed)");
|
||||
_req_ptr->_success = false;
|
||||
_state = REQ_COMPLETE;
|
||||
_req_ptr = nullptr;
|
||||
return;
|
||||
}
|
||||
memcpy(buf_ptr, &req, sizeof(req));
|
||||
_state = (State)state_uint;
|
||||
}
|
||||
|
||||
|
||||
char const *Sb_check_request::type_to_string(Type type)
|
||||
void Sb_check_channel::execute(bool &progress)
|
||||
{
|
||||
switch (type) {
|
||||
case INVALID: return "invalid";
|
||||
case CHECK: return "check";
|
||||
}
|
||||
return "?";
|
||||
}
|
||||
if (!_req_ptr)
|
||||
return;
|
||||
|
||||
switch (_state) {
|
||||
case REQ_SUBMITTED:
|
||||
|
||||
/**************
|
||||
** Sb_check **
|
||||
**************/
|
||||
_highest_gen = 0;
|
||||
_highest_gen_sb_idx = 0;
|
||||
_snap_idx = 0;
|
||||
_sb_idx = 0;
|
||||
_scan_for_highest_gen_sb_done = false;
|
||||
_generate_req<Block_io::Read>(READ_BLK_SUCCESSFUL, progress, _sb_idx, _blk);
|
||||
break;
|
||||
|
||||
char const *Sb_check::_state_to_step_label(Channel::Sb_slot_state state)
|
||||
{
|
||||
switch (state) {
|
||||
case Channel::READ_DONE: return "read";
|
||||
case Channel::VBD_CHECK_DONE: return "vbd check";
|
||||
case Channel::FT_CHECK_DONE: return "ft check";
|
||||
case Channel::MT_CHECK_DONE: return "mt check";
|
||||
case READ_BLK_SUCCESSFUL:
|
||||
|
||||
_sb.decode_from_blk(_blk);
|
||||
if (_scan_for_highest_gen_sb_done) {
|
||||
if (!_sb.valid()) {
|
||||
_mark_req_failed(progress, "no valid superblock");;
|
||||
break;
|
||||
}
|
||||
Snapshot &snap { _sb.snapshots.items[_snap_idx] };
|
||||
if (snap.valid) {
|
||||
Snapshot &snap { _sb.snapshots.items[_snap_idx] };
|
||||
_tree_root.construct(snap.pba, snap.gen, snap.hash, snap.max_level, _sb.degree, snap.nr_of_leaves);
|
||||
_generate_req<Vbd_check_request>(CHECK_VBD_SUCCESSFUL, progress, *_tree_root);
|
||||
if (VERBOSE_CHECK)
|
||||
log(" check snap ", _snap_idx, " (", snap, ")");
|
||||
} else {
|
||||
_state = CHECK_VBD_SUCCESSFUL;
|
||||
progress = true;
|
||||
if (VERBOSE_CHECK)
|
||||
log(" skip snap ", _snap_idx, " as it is unused");
|
||||
}
|
||||
} else {
|
||||
Snapshot &snap { _sb.curr_snap() };
|
||||
if (_sb.valid() && snap.gen > _highest_gen) {
|
||||
_highest_gen = snap.gen;
|
||||
_highest_gen_sb_idx = _sb_idx;
|
||||
}
|
||||
if (_sb_idx < MAX_SUPERBLOCK_INDEX) {
|
||||
_sb_idx++;
|
||||
_generate_req<Block_io::Read>(READ_BLK_SUCCESSFUL, progress, _sb_idx, _blk);
|
||||
progress = true;
|
||||
} else {
|
||||
_scan_for_highest_gen_sb_done = true;
|
||||
_generate_req<Block_io::Read>(READ_BLK_SUCCESSFUL, progress, _highest_gen_sb_idx, _blk);
|
||||
if (VERBOSE_CHECK)
|
||||
log("check superblock ", _highest_gen_sb_idx, "\n read superblock");
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
case CHECK_VBD_SUCCESSFUL:
|
||||
|
||||
if (_snap_idx < MAX_SNAP_IDX) {
|
||||
_snap_idx++;
|
||||
_state = READ_BLK_SUCCESSFUL;
|
||||
progress = true;
|
||||
} else {
|
||||
_snap_idx = 0;
|
||||
_tree_root.construct(_sb.free_number, _sb.free_gen, _sb.free_hash, _sb.free_max_level, _sb.free_degree, _sb.free_leaves);
|
||||
_generate_req<Ft_check_request>(CHECK_FT_SUCCESSFUL, progress, *_tree_root);
|
||||
if (VERBOSE_CHECK)
|
||||
log(" check free tree");
|
||||
}
|
||||
break;
|
||||
|
||||
case CHECK_FT_SUCCESSFUL:
|
||||
|
||||
_tree_root.construct(_sb.meta_number, _sb.meta_gen, _sb.meta_hash, _sb.meta_max_level, _sb.meta_degree, _sb.meta_leaves);
|
||||
_generate_req<Ft_check_request>(CHECK_MT_SUCCESSFUL, progress, *_tree_root);
|
||||
if (VERBOSE_CHECK)
|
||||
log(" check meta tree");
|
||||
break;
|
||||
|
||||
case CHECK_MT_SUCCESSFUL: _mark_req_successful(progress); break;
|
||||
default: break;
|
||||
}
|
||||
return "?";
|
||||
}
|
||||
|
||||
|
||||
bool Sb_check::_handle_failed_generated_req(Channel &chan,
|
||||
bool &progress)
|
||||
void Sb_check_channel::_mark_req_failed(bool &progress, char const *str)
|
||||
{
|
||||
if (chan._gen_prim_success)
|
||||
return false;
|
||||
|
||||
_mark_req_failed(
|
||||
chan, progress, _state_to_step_label(chan._sb_slot_state));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void Sb_check::_execute_check(Channel &chan,
|
||||
bool &progress)
|
||||
{
|
||||
switch (chan._state) {
|
||||
case Channel::INSPECT_SBS:
|
||||
|
||||
switch (chan._sb_slot_state) {
|
||||
case Channel::INIT:
|
||||
|
||||
chan._sb_slot_state = Channel::READ_STARTED;
|
||||
chan._gen_prim_blk_nr = chan._sb_slot_idx;
|
||||
progress = true;
|
||||
break;
|
||||
|
||||
case Channel::READ_DONE:
|
||||
{
|
||||
if (_handle_failed_generated_req(chan, progress))
|
||||
break;
|
||||
|
||||
Snapshot &snap {
|
||||
chan._sb_slot.snapshots.items[chan._sb_slot.curr_snap] };
|
||||
|
||||
if (chan._sb_slot.valid() &&
|
||||
snap.gen > chan._highest_gen) {
|
||||
|
||||
chan._highest_gen = snap.gen;
|
||||
chan._last_sb_slot_idx = chan._sb_slot_idx;
|
||||
}
|
||||
if (chan._sb_slot_idx < MAX_SUPERBLOCK_INDEX) {
|
||||
|
||||
chan._sb_slot_idx++;
|
||||
chan._sb_slot_state = Channel::INIT;
|
||||
progress = true;
|
||||
|
||||
} else {
|
||||
|
||||
chan._state = Channel::CHECK_SB;
|
||||
chan._sb_slot_idx = chan._last_sb_slot_idx;
|
||||
chan._sb_slot_state = Channel::INIT;
|
||||
progress = true;
|
||||
|
||||
if (VERBOSE_CHECK)
|
||||
log("check superblock ", chan._sb_slot_idx);
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
||||
case Channel::CHECK_SB:
|
||||
|
||||
switch (chan._sb_slot_state) {
|
||||
case Channel::INIT:
|
||||
|
||||
chan._sb_slot_state = Channel::READ_STARTED;
|
||||
chan._gen_prim_blk_nr = chan._sb_slot_idx;
|
||||
progress = true;
|
||||
|
||||
if (VERBOSE_CHECK)
|
||||
log(" read superblock");
|
||||
|
||||
break;
|
||||
|
||||
case Channel::READ_DONE:
|
||||
|
||||
if (_handle_failed_generated_req(chan, progress))
|
||||
break;
|
||||
|
||||
if (chan._sb_slot.valid()) {
|
||||
|
||||
Snapshot &snap {
|
||||
chan._sb_slot.snapshots.items[chan._snap_idx] };
|
||||
|
||||
if (snap.valid) {
|
||||
|
||||
chan._sb_slot_state = Channel::VBD_CHECK_STARTED;
|
||||
chan._gen_prim_blk_nr = snap.pba;
|
||||
progress = true;
|
||||
|
||||
if (VERBOSE_CHECK)
|
||||
log(" check snap ", chan._snap_idx, " (", snap, ")");
|
||||
|
||||
} else {
|
||||
|
||||
chan._sb_slot_state = Channel::VBD_CHECK_DONE;
|
||||
progress = true;
|
||||
|
||||
if (VERBOSE_CHECK)
|
||||
log(" skip snap ", chan._snap_idx,
|
||||
" as it is unused");
|
||||
}
|
||||
} else {
|
||||
|
||||
chan._sb_slot_state = Channel::DONE;
|
||||
progress = true;
|
||||
|
||||
if (VERBOSE_CHECK)
|
||||
log(" skip superblock as it is unused");
|
||||
}
|
||||
break;
|
||||
|
||||
case Channel::VBD_CHECK_DONE:
|
||||
|
||||
if (_handle_failed_generated_req(chan, progress))
|
||||
break;
|
||||
|
||||
if (chan._snap_idx < MAX_SNAP_IDX) {
|
||||
|
||||
chan._snap_idx++;
|
||||
chan._sb_slot_state = Channel::READ_DONE;
|
||||
progress = true;
|
||||
|
||||
} else {
|
||||
|
||||
chan._snap_idx = 0;
|
||||
chan._gen_prim_blk_nr = chan._sb_slot.free_number;
|
||||
chan._sb_slot_state = Channel::FT_CHECK_STARTED;
|
||||
progress = true;
|
||||
|
||||
if (VERBOSE_CHECK)
|
||||
log(" check free tree");
|
||||
}
|
||||
break;
|
||||
|
||||
case Channel::FT_CHECK_DONE:
|
||||
|
||||
if (_handle_failed_generated_req(chan, progress))
|
||||
break;
|
||||
|
||||
chan._sb_slot_state = Channel::MT_CHECK_STARTED;
|
||||
progress = true;
|
||||
|
||||
if (VERBOSE_CHECK)
|
||||
log(" check meta tree");
|
||||
|
||||
break;
|
||||
|
||||
case Channel::MT_CHECK_DONE:
|
||||
|
||||
if (_handle_failed_generated_req(chan, progress))
|
||||
break;
|
||||
|
||||
_mark_req_successful(chan, progress);
|
||||
break;
|
||||
|
||||
case Channel::DONE:
|
||||
|
||||
break;
|
||||
|
||||
default:
|
||||
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Sb_check::_mark_req_failed(Channel &chan,
|
||||
bool &progress,
|
||||
char const *str)
|
||||
{
|
||||
error("sb check: request (", chan._request, ") failed at step \"", str, "\"");
|
||||
chan._request._success = false;
|
||||
chan._sb_slot_state = Channel::DONE;
|
||||
error("sb check: request (", *_req_ptr, ") failed at step \"", str, "\"");
|
||||
_req_ptr->_success = false;
|
||||
_state = REQ_COMPLETE;
|
||||
_req_ptr = nullptr;
|
||||
progress = true;
|
||||
}
|
||||
|
||||
|
||||
void Sb_check::_mark_req_successful(Channel &chan,
|
||||
bool &progress)
|
||||
void Sb_check_channel::_mark_req_successful(bool &progress)
|
||||
{
|
||||
Request &req { chan._request };
|
||||
Request &req { *_req_ptr };
|
||||
req._success = true;
|
||||
chan._sb_slot_state = Channel::DONE;
|
||||
_state = REQ_COMPLETE;
|
||||
_req_ptr = nullptr;
|
||||
progress = true;
|
||||
}
|
||||
|
||||
|
||||
bool Sb_check::_peek_completed_request(uint8_t *buf_ptr,
|
||||
size_t buf_size)
|
||||
void Sb_check_channel::_request_submitted(Module_request &mod_req)
|
||||
{
|
||||
for (Channel &channel : _channels) {
|
||||
if (channel._sb_slot_state == Channel::DONE) {
|
||||
if (sizeof(channel._request) > buf_size) {
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
memcpy(buf_ptr, &channel._request, sizeof(channel._request));
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
_req_ptr = static_cast<Request *>(&mod_req);
|
||||
_state = REQ_SUBMITTED;
|
||||
}
|
||||
|
||||
|
||||
void Sb_check::_drop_completed_request(Module_request &req)
|
||||
Sb_check::Sb_check()
|
||||
{
|
||||
Module_request_id id { 0 };
|
||||
id = req.dst_request_id();
|
||||
if (id >= NR_OF_CHANNELS) {
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
Module_channel_id id { 0 };
|
||||
for (Constructible<Channel> &chan : _channels) {
|
||||
chan.construct(id++);
|
||||
add_channel(*chan);
|
||||
}
|
||||
if (_channels[id]._sb_slot_state != Channel::DONE) {
|
||||
class Exception_2 { };
|
||||
throw Exception_2 { };
|
||||
}
|
||||
_channels[id]._sb_slot_state = Channel::INACTIVE;
|
||||
}
|
||||
|
||||
|
||||
bool Sb_check::_peek_generated_request(uint8_t *buf_ptr,
|
||||
size_t buf_size)
|
||||
{
|
||||
for (Module_request_id id { 0 }; id < NR_OF_CHANNELS; id++) {
|
||||
|
||||
Channel &chan { _channels[id] };
|
||||
|
||||
if (chan._sb_slot_state == Channel::INACTIVE)
|
||||
continue;
|
||||
|
||||
switch (chan._sb_slot_state) {
|
||||
case Channel::READ_STARTED:
|
||||
|
||||
construct_in_buf<Block_io_request>(
|
||||
buf_ptr, buf_size, SB_CHECK, id,
|
||||
Block_io_request::READ, 0, 0, 0,
|
||||
chan._gen_prim_blk_nr, 0, 1, &chan._encoded_blk,
|
||||
nullptr);
|
||||
|
||||
return true;
|
||||
|
||||
case Channel::VBD_CHECK_STARTED:
|
||||
{
|
||||
Snapshot const &snap {
|
||||
chan._sb_slot.snapshots.items[chan._snap_idx] };
|
||||
|
||||
construct_in_buf<Vbd_check_request>(
|
||||
buf_ptr, buf_size, SB_CHECK, id,
|
||||
Vbd_check_request::CHECK, snap.max_level,
|
||||
chan._sb_slot.degree - 1,
|
||||
snap.nr_of_leaves,
|
||||
Type_1_node { snap.pba, snap.gen, snap.hash });
|
||||
|
||||
return true;
|
||||
}
|
||||
case Channel::FT_CHECK_STARTED:
|
||||
|
||||
construct_in_buf<Ft_check_request>(
|
||||
buf_ptr, buf_size, SB_CHECK, id,
|
||||
Ft_check_request::CHECK,
|
||||
(Tree_level_index)chan._sb_slot.free_max_level,
|
||||
(Tree_degree)chan._sb_slot.free_degree - 1,
|
||||
(Number_of_leaves)chan._sb_slot.free_leaves,
|
||||
Type_1_node {
|
||||
chan._sb_slot.free_number,
|
||||
chan._sb_slot.free_gen,
|
||||
chan._sb_slot.free_hash });
|
||||
|
||||
return true;
|
||||
|
||||
case Channel::MT_CHECK_STARTED:
|
||||
|
||||
construct_in_buf<Ft_check_request>(
|
||||
buf_ptr, buf_size, SB_CHECK, id,
|
||||
Ft_check_request::CHECK,
|
||||
(Tree_level_index)chan._sb_slot.meta_max_level,
|
||||
(Tree_degree)chan._sb_slot.meta_degree - 1,
|
||||
(Number_of_leaves)chan._sb_slot.meta_leaves,
|
||||
Type_1_node {
|
||||
chan._sb_slot.meta_number,
|
||||
chan._sb_slot.meta_gen,
|
||||
chan._sb_slot.meta_hash });
|
||||
|
||||
return true;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
void Sb_check::_drop_generated_request(Module_request &req)
|
||||
{
|
||||
Module_request_id const id { req.src_request_id() };
|
||||
if (id >= NR_OF_CHANNELS) {
|
||||
class Exception_0 { };
|
||||
throw Exception_0 { };
|
||||
}
|
||||
Channel &chan { _channels[id] };
|
||||
switch (chan._sb_slot_state) {
|
||||
case Channel::READ_STARTED: chan._sb_slot_state = Channel::READ_DROPPED; break;
|
||||
case Channel::VBD_CHECK_STARTED: chan._sb_slot_state = Channel::VBD_CHECK_DROPPED; break;
|
||||
case Channel::FT_CHECK_STARTED: chan._sb_slot_state = Channel::FT_CHECK_DROPPED; break;
|
||||
case Channel::MT_CHECK_STARTED: chan._sb_slot_state = Channel::MT_CHECK_DROPPED; break;
|
||||
default:
|
||||
class Exception_4 { };
|
||||
throw Exception_4 { };
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Sb_check::generated_request_complete(Module_request &mod_req)
|
||||
{
|
||||
Module_request_id const id { mod_req.src_request_id() };
|
||||
if (id >= NR_OF_CHANNELS) {
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
Channel &chan { _channels[id] };
|
||||
switch (mod_req.dst_module_id()) {
|
||||
case BLOCK_IO:
|
||||
{
|
||||
Block_io_request &gen_req { *static_cast<Block_io_request*>(&mod_req) };
|
||||
chan._gen_prim_success = gen_req.success();
|
||||
switch (chan._sb_slot_state) {
|
||||
case Channel::READ_DROPPED:
|
||||
chan._sb_slot.decode_from_blk(chan._encoded_blk);
|
||||
chan._sb_slot_state = Channel::READ_DONE;
|
||||
break;
|
||||
default:
|
||||
class Exception_2 { };
|
||||
throw Exception_2 { };
|
||||
}
|
||||
break;
|
||||
}
|
||||
case VBD_CHECK:
|
||||
{
|
||||
Vbd_check_request &gen_req { *static_cast<Vbd_check_request*>(&mod_req) };
|
||||
chan._gen_prim_success = gen_req.success();
|
||||
switch (chan._sb_slot_state) {
|
||||
case Channel::VBD_CHECK_DROPPED: chan._sb_slot_state = Channel::VBD_CHECK_DONE; break;
|
||||
default:
|
||||
class Exception_3 { };
|
||||
throw Exception_3 { };
|
||||
}
|
||||
break;
|
||||
}
|
||||
case FT_CHECK:
|
||||
{
|
||||
Ft_check_request &gen_req { *static_cast<Ft_check_request*>(&mod_req) };
|
||||
chan._gen_prim_success = gen_req.success();
|
||||
switch (chan._sb_slot_state) {
|
||||
case Channel::FT_CHECK_DROPPED: chan._sb_slot_state = Channel::FT_CHECK_DONE; break;
|
||||
case Channel::MT_CHECK_DROPPED: chan._sb_slot_state = Channel::MT_CHECK_DONE; break;
|
||||
default:
|
||||
class Exception_3 { };
|
||||
throw Exception_3 { };
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
class Exception_8 { };
|
||||
throw Exception_8 { };
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool Sb_check::ready_to_submit_request()
|
||||
{
|
||||
for (Channel &chan : _channels) {
|
||||
if (chan._sb_slot_state == Channel::INACTIVE)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
void Sb_check::submit_request(Module_request &req)
|
||||
{
|
||||
for (Module_request_id id { 0 }; id < NR_OF_CHANNELS; id++) {
|
||||
Channel &chan { _channels[id] };
|
||||
if (chan._sb_slot_state == Channel::INACTIVE) {
|
||||
req.dst_request_id(id);
|
||||
chan = Channel { };
|
||||
chan._request = *static_cast<Request *>(&req);
|
||||
chan._sb_slot_state = Channel::INIT;
|
||||
return;
|
||||
}
|
||||
}
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
|
||||
|
||||
void Sb_check::execute(bool &progress)
|
||||
{
|
||||
for (Channel &chan : _channels) {
|
||||
|
||||
if (chan._sb_slot_state == Channel::INACTIVE)
|
||||
continue;
|
||||
|
||||
Request &req { chan._request };
|
||||
switch (req._type) {
|
||||
case Request::CHECK:
|
||||
|
||||
_execute_check(chan, progress);
|
||||
break;
|
||||
|
||||
default:
|
||||
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
}
|
||||
for_each_channel<Channel>([&] (Channel &chan) {
|
||||
chan.execute(progress); });
|
||||
}
|
||||
|
@ -1,5 +1,6 @@
|
||||
/*
|
||||
* \brief Module for initializing the superblocks of a new Tresor
|
||||
* \author Martin Stein
|
||||
* \author Josef Soentgen
|
||||
* \date 2023-03-14
|
||||
*/
|
||||
@ -11,12 +12,8 @@
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
/* base includes */
|
||||
#include <base/log.h>
|
||||
|
||||
/* tresor includes */
|
||||
#include <tresor/sha256_4k_hash.h>
|
||||
#include <tresor/block_allocator.h>
|
||||
#include <tresor/hash.h>
|
||||
#include <tresor/block_io.h>
|
||||
#include <tresor/vbd_initializer.h>
|
||||
#include <tresor/ft_initializer.h>
|
||||
@ -25,701 +22,150 @@
|
||||
|
||||
using namespace Tresor;
|
||||
|
||||
|
||||
Sb_initializer_request::Sb_initializer_request(Module_id src_module_id,
|
||||
Module_request_id src_request_id)
|
||||
Sb_initializer_request::
|
||||
Sb_initializer_request(Module_id src_mod, Module_channel_id src_chan, Tree_level_index vbd_max_lvl,
|
||||
Tree_degree vbd_degree, Number_of_leaves vbd_num_leaves, Tree_level_index ft_max_lvl,
|
||||
Tree_degree ft_degree, Number_of_leaves ft_num_leaves, Tree_level_index mt_max_lvl,
|
||||
Tree_degree mt_degree, Number_of_leaves mt_num_leaves, Pba_allocator &pba_alloc, bool &success)
|
||||
:
|
||||
Module_request { src_module_id, src_request_id, SB_INITIALIZER }
|
||||
Module_request { src_mod, src_chan, SB_INITIALIZER }, _vbd_max_lvl { vbd_max_lvl },
|
||||
_vbd_degree { vbd_degree }, _vbd_num_leaves { vbd_num_leaves }, _ft_max_lvl { ft_max_lvl },
|
||||
_ft_degree { ft_degree }, _ft_num_leaves { ft_num_leaves }, _mt_max_lvl { mt_max_lvl },
|
||||
_mt_degree { mt_degree }, _mt_num_leaves { mt_num_leaves }, _pba_alloc { pba_alloc }, _success { success }
|
||||
{ }
|
||||
|
||||
|
||||
void Sb_initializer_request::create(void *buf_ptr,
|
||||
size_t buf_size,
|
||||
uint64_t src_module_id,
|
||||
uint64_t src_request_id,
|
||||
size_t req_type,
|
||||
Tree_level_index vbd_max_level_idx,
|
||||
Tree_degree vbd_max_child_idx,
|
||||
Number_of_leaves vbd_nr_of_leaves,
|
||||
Tree_level_index ft_max_level_idx,
|
||||
Tree_degree ft_max_child_idx,
|
||||
Number_of_leaves ft_nr_of_leaves,
|
||||
Tree_level_index mt_max_level_idx,
|
||||
Tree_degree mt_max_child_idx,
|
||||
Number_of_leaves mt_nr_of_leaves)
|
||||
void Sb_initializer_channel::_generated_req_completed(State_uint state_uint)
|
||||
{
|
||||
Sb_initializer_request req { src_module_id, src_request_id };
|
||||
|
||||
req._type = (Type)req_type;
|
||||
req._vbd_max_level_idx = vbd_max_level_idx;
|
||||
req._vbd_max_child_idx = vbd_max_child_idx;
|
||||
req._vbd_nr_of_leaves = vbd_nr_of_leaves;
|
||||
req._ft_max_level_idx = ft_max_level_idx;
|
||||
req._ft_max_child_idx = ft_max_child_idx;
|
||||
req._ft_nr_of_leaves = ft_nr_of_leaves;
|
||||
req._mt_max_level_idx = mt_max_level_idx;
|
||||
req._mt_max_child_idx = mt_max_child_idx;
|
||||
req._mt_nr_of_leaves = mt_nr_of_leaves;
|
||||
|
||||
if (sizeof(req) > buf_size) {
|
||||
class Bad_size_0 { };
|
||||
throw Bad_size_0 { };
|
||||
}
|
||||
memcpy(buf_ptr, &req, sizeof(req));
|
||||
}
|
||||
|
||||
|
||||
char const *Sb_initializer_request::type_to_string(Type type)
|
||||
{
|
||||
switch (type) {
|
||||
case INVALID: return "invalid";
|
||||
case INIT: return "init";
|
||||
}
|
||||
return "?";
|
||||
}
|
||||
|
||||
|
||||
void Sb_initializer::_populate_sb_slot(Channel &channel,
|
||||
Physical_block_address first,
|
||||
Number_of_blocks num)
|
||||
{
|
||||
Superblock &sb = channel._sb;
|
||||
|
||||
Request const &req = channel._request;
|
||||
Type_1_node const &vbd_node = channel._vbd_node;
|
||||
Type_1_node const &ft_node = channel._ft_node;
|
||||
Type_1_node const &mt_node = channel._mt_node;
|
||||
|
||||
sb.state = Superblock::NORMAL;
|
||||
sb.snapshots.items[0] = Snapshot {
|
||||
.hash = vbd_node.hash,
|
||||
.pba = vbd_node.pba,
|
||||
.gen = 0,
|
||||
.nr_of_leaves = req._vbd_nr_of_leaves,
|
||||
.max_level = req._vbd_max_level_idx,
|
||||
.valid = true,
|
||||
.id = 0,
|
||||
.keep = false
|
||||
};
|
||||
|
||||
sb.rekeying_vba = 0;
|
||||
sb.resizing_nr_of_pbas = 0;
|
||||
sb.resizing_nr_of_leaves = 0;
|
||||
memset(&sb.previous_key, 0, sizeof(sb.previous_key));
|
||||
sb.current_key = channel._key_cipher;
|
||||
sb.curr_snap = 0;
|
||||
sb.degree = req._vbd_max_child_idx;
|
||||
sb.first_pba = first;
|
||||
sb.nr_of_pbas = num;
|
||||
sb.last_secured_generation = 0;
|
||||
sb.free_gen = 0;
|
||||
sb.free_number = ft_node.pba;
|
||||
sb.free_hash = ft_node.hash;
|
||||
sb.free_max_level = req._ft_max_level_idx;
|
||||
sb.free_degree = req._ft_max_child_idx;
|
||||
sb.free_leaves = req._ft_nr_of_leaves;
|
||||
sb.meta_gen = 0;
|
||||
sb.meta_number = mt_node.pba;
|
||||
sb.meta_hash = mt_node.hash;
|
||||
sb.meta_max_level = req._mt_max_level_idx;
|
||||
sb.meta_degree = req._mt_max_child_idx;
|
||||
sb.meta_leaves = req._mt_nr_of_leaves;
|
||||
}
|
||||
|
||||
|
||||
extern uint64_t block_allocator_first_block();
|
||||
extern uint64_t block_allocator_nr_of_blks();
|
||||
|
||||
|
||||
void Sb_initializer::_execute(Channel &channel,
|
||||
bool &progress)
|
||||
{
|
||||
|
||||
using CS = Channel::State;
|
||||
|
||||
switch (channel._state) {
|
||||
case CS::IN_PROGRESS:
|
||||
|
||||
if (channel._sb_slot_index == 0) {
|
||||
channel._state = CS::VBD_REQUEST_PENDING;
|
||||
} else {
|
||||
channel._sb.encode_to_blk(channel._encoded_blk);
|
||||
channel._state = CS::WRITE_REQUEST_PENDING;
|
||||
}
|
||||
progress = true;
|
||||
break;
|
||||
|
||||
case CS::VBD_REQUEST_COMPLETE:
|
||||
|
||||
channel._state = CS::FT_REQUEST_PENDING;
|
||||
progress = true;
|
||||
break;
|
||||
|
||||
case CS::FT_REQUEST_COMPLETE:
|
||||
|
||||
channel._state = CS::MT_REQUEST_PENDING;
|
||||
progress = true;
|
||||
break;
|
||||
|
||||
case CS::MT_REQUEST_COMPLETE:
|
||||
|
||||
channel._state = CS::TA_REQUEST_CREATE_KEY_PENDING;
|
||||
progress = true;
|
||||
break;
|
||||
|
||||
case CS::TA_REQUEST_CREATE_KEY_COMPLETE:
|
||||
|
||||
channel._state = CS::TA_REQUEST_ENCRYPT_KEY_PENDING;
|
||||
progress = true;
|
||||
break;
|
||||
|
||||
case CS::TA_REQUEST_ENCRYPT_KEY_COMPLETE:
|
||||
|
||||
_populate_sb_slot(channel,
|
||||
Physical_block_address { block_allocator_first_block() } - NR_OF_SUPERBLOCK_SLOTS,
|
||||
Number_of_blocks { (uint32_t)block_allocator_nr_of_blks() + NR_OF_SUPERBLOCK_SLOTS });
|
||||
|
||||
channel._sb.encode_to_blk(channel._encoded_blk);
|
||||
calc_sha256_4k_hash(channel._encoded_blk, channel._sb_hash);
|
||||
|
||||
channel._state = CS::WRITE_REQUEST_PENDING;
|
||||
progress = true;
|
||||
break;
|
||||
|
||||
case CS::WRITE_REQUEST_COMPLETE:
|
||||
|
||||
channel._state = CS::SYNC_REQUEST_PENDING;
|
||||
progress = true;
|
||||
break;
|
||||
|
||||
case CS::SYNC_REQUEST_COMPLETE:
|
||||
|
||||
if (channel._sb_slot_index == 0) {
|
||||
channel._state = CS::TA_REQUEST_SECURE_SB_PENDING;
|
||||
} else {
|
||||
channel._state = CS::SLOT_COMPLETE;
|
||||
}
|
||||
progress = true;
|
||||
break;
|
||||
|
||||
case CS::TA_REQUEST_SECURE_SB_COMPLETE:
|
||||
|
||||
channel._state = CS::SLOT_COMPLETE;
|
||||
progress = true;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
/* finished */
|
||||
if (channel._sb_slot_index == NR_OF_SUPERBLOCK_SLOTS)
|
||||
_mark_req_successful(channel, progress);
|
||||
}
|
||||
|
||||
|
||||
void Sb_initializer::_execute_init(Channel &channel,
|
||||
bool &progress)
|
||||
{
|
||||
using CS = Channel::State;
|
||||
|
||||
switch (channel._state) {
|
||||
case CS::SUBMITTED:
|
||||
|
||||
/*
|
||||
* Reset the index on every new job as it is
|
||||
* indicator for a finished job.
|
||||
*/
|
||||
channel._sb_slot_index = 0;
|
||||
|
||||
channel._state = Channel::PENDING;
|
||||
progress = true;
|
||||
return;
|
||||
|
||||
case CS::PENDING:
|
||||
|
||||
/*
|
||||
* Remove residual data here as we will end up
|
||||
* here for every SB slot.
|
||||
*/
|
||||
channel.clean_data();
|
||||
|
||||
channel._state = Channel::IN_PROGRESS;
|
||||
progress = true;
|
||||
return;
|
||||
|
||||
case CS::IN_PROGRESS:
|
||||
|
||||
_execute(channel, progress);
|
||||
return;
|
||||
|
||||
case CS::SLOT_COMPLETE:
|
||||
|
||||
if (channel._sb_slot_index < NR_OF_SUPERBLOCK_SLOTS) {
|
||||
++channel._sb_slot_index;
|
||||
channel._state = Channel::PENDING;
|
||||
progress = true;
|
||||
}
|
||||
return;
|
||||
|
||||
case CS::FT_REQUEST_COMPLETE:
|
||||
|
||||
_execute(channel, progress);
|
||||
return;
|
||||
|
||||
case CS::MT_REQUEST_COMPLETE:
|
||||
|
||||
_execute(channel, progress);
|
||||
return;
|
||||
|
||||
case CS::VBD_REQUEST_COMPLETE:
|
||||
|
||||
_execute(channel, progress);
|
||||
return;
|
||||
|
||||
case CS::SYNC_REQUEST_COMPLETE:
|
||||
|
||||
_execute(channel, progress);
|
||||
return;
|
||||
|
||||
case CS::TA_REQUEST_CREATE_KEY_COMPLETE:
|
||||
|
||||
_execute(channel, progress);
|
||||
return;
|
||||
|
||||
case CS::TA_REQUEST_ENCRYPT_KEY_COMPLETE:
|
||||
|
||||
_execute(channel, progress);
|
||||
return;
|
||||
|
||||
case CS::TA_REQUEST_SECURE_SB_COMPLETE:
|
||||
|
||||
_execute(channel, progress);
|
||||
return;
|
||||
|
||||
case CS::WRITE_REQUEST_COMPLETE:
|
||||
|
||||
_execute(channel, progress);
|
||||
return;
|
||||
|
||||
default:
|
||||
/*
|
||||
* Omit other states related to FT/MT/VBD as
|
||||
* those are handled via Module API.
|
||||
*/
|
||||
if (!_generated_req_success) {
|
||||
error("sb initializer: request (", *_req_ptr, ") failed because generated request failed)");
|
||||
_req_ptr->_success = false;
|
||||
_state = REQ_COMPLETE;
|
||||
_req_ptr = nullptr;
|
||||
return;
|
||||
}
|
||||
_state = (State)state_uint;
|
||||
}
|
||||
|
||||
|
||||
void Sb_initializer::_mark_req_failed(Channel &channel,
|
||||
bool &progress,
|
||||
char const *str)
|
||||
void Sb_initializer_channel::_request_submitted(Module_request &mod_req)
|
||||
{
|
||||
error("request failed: failed to ", str);
|
||||
channel._request._success = false;
|
||||
channel._state = Channel::COMPLETE;
|
||||
_req_ptr = static_cast<Request *>(&mod_req);
|
||||
_state = REQ_SUBMITTED;
|
||||
}
|
||||
|
||||
|
||||
void Sb_initializer_channel::_mark_req_successful(bool &progress)
|
||||
{
|
||||
_req_ptr->_success = true;
|
||||
_state = REQ_COMPLETE;
|
||||
_req_ptr = nullptr;
|
||||
progress = true;
|
||||
}
|
||||
|
||||
|
||||
void Sb_initializer::_mark_req_successful(Channel &channel,
|
||||
bool &progress)
|
||||
void Sb_initializer_channel::execute(bool &progress)
|
||||
{
|
||||
Request &req { channel._request };
|
||||
if (!_req_ptr)
|
||||
return;
|
||||
|
||||
req._success = true;
|
||||
|
||||
channel._state = Channel::COMPLETE;
|
||||
progress = true;
|
||||
}
|
||||
|
||||
|
||||
bool Sb_initializer::_peek_completed_request(uint8_t *buf_ptr,
|
||||
size_t buf_size)
|
||||
{
|
||||
for (Channel &channel : _channels) {
|
||||
if (channel._state == Channel::COMPLETE) {
|
||||
if (sizeof(channel._request) > buf_size) {
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
memcpy(buf_ptr, &channel._request, sizeof(channel._request));
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
void Sb_initializer::_drop_completed_request(Module_request &req)
|
||||
{
|
||||
Module_request_id id { 0 };
|
||||
id = req.dst_request_id();
|
||||
if (id >= NR_OF_CHANNELS) {
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
if (_channels[id]._state != Channel::COMPLETE) {
|
||||
class Exception_2 { };
|
||||
throw Exception_2 { };
|
||||
}
|
||||
_channels[id]._state = Channel::INACTIVE;
|
||||
}
|
||||
|
||||
|
||||
bool Sb_initializer::_peek_generated_request(uint8_t *buf_ptr,
|
||||
size_t buf_size)
|
||||
{
|
||||
using CS = Channel::State;
|
||||
|
||||
for (Module_request_id id { 0 }; id < NR_OF_CHANNELS; id++) {
|
||||
|
||||
Channel const &channel { _channels[id] };
|
||||
|
||||
if (channel._state == CS::INACTIVE)
|
||||
continue;
|
||||
|
||||
switch (channel._state) {
|
||||
case CS::VBD_REQUEST_PENDING:
|
||||
{
|
||||
Vbd_initializer_request::Type const vbd_initializer_req_type {
|
||||
Vbd_initializer_request::INIT };
|
||||
|
||||
Vbd_initializer_request::create(
|
||||
buf_ptr, buf_size, SB_INITIALIZER, id,
|
||||
vbd_initializer_req_type,
|
||||
channel._request._vbd_max_level_idx,
|
||||
channel._request._vbd_max_child_idx - 1,
|
||||
channel._request._vbd_nr_of_leaves);
|
||||
|
||||
return true;
|
||||
}
|
||||
case CS::FT_REQUEST_PENDING:
|
||||
{
|
||||
Ft_initializer_request::Type const ft_initializer_req_type {
|
||||
Ft_initializer_request::INIT };
|
||||
|
||||
Ft_initializer_request::create(
|
||||
buf_ptr, buf_size, SB_INITIALIZER, id,
|
||||
ft_initializer_req_type,
|
||||
channel._request._ft_max_level_idx,
|
||||
channel._request._ft_max_child_idx - 1,
|
||||
channel._request._ft_nr_of_leaves);
|
||||
|
||||
return true;
|
||||
}
|
||||
case CS::MT_REQUEST_PENDING:
|
||||
{
|
||||
Ft_initializer_request::Type const ft_initializer_req_type {
|
||||
Ft_initializer_request::INIT };
|
||||
|
||||
Ft_initializer_request::create(
|
||||
buf_ptr, buf_size, SB_INITIALIZER, id,
|
||||
ft_initializer_req_type,
|
||||
channel._request._ft_max_level_idx,
|
||||
channel._request._ft_max_child_idx - 1,
|
||||
channel._request._ft_nr_of_leaves);
|
||||
|
||||
return true;
|
||||
}
|
||||
case CS::WRITE_REQUEST_PENDING:
|
||||
{
|
||||
Block_io_request::Type const block_io_req_type {
|
||||
Block_io_request::WRITE };
|
||||
|
||||
construct_in_buf<Block_io_request>(
|
||||
buf_ptr, buf_size, SB_INITIALIZER, id, block_io_req_type, 0, 0,
|
||||
0, channel._sb_slot_index, 0, 1, (void *)&channel._encoded_blk,
|
||||
nullptr);
|
||||
|
||||
return true;
|
||||
}
|
||||
case CS::SYNC_REQUEST_PENDING:
|
||||
{
|
||||
Block_io_request::Type const block_io_req_type {
|
||||
Block_io_request::SYNC };
|
||||
|
||||
construct_in_buf<Block_io_request>(
|
||||
buf_ptr, buf_size, SB_INITIALIZER, id,
|
||||
block_io_req_type, 0, 0, 0,
|
||||
channel._sb_slot_index, 0,
|
||||
0, nullptr, nullptr);
|
||||
|
||||
return true;
|
||||
}
|
||||
case CS::TA_REQUEST_CREATE_KEY_PENDING:
|
||||
{
|
||||
Trust_anchor_request::Type const trust_anchor_req_type {
|
||||
Trust_anchor_request::CREATE_KEY };
|
||||
|
||||
Trust_anchor_request::create(
|
||||
buf_ptr, buf_size, SB_INITIALIZER, id,
|
||||
trust_anchor_req_type,
|
||||
nullptr, nullptr, nullptr, nullptr);
|
||||
|
||||
return true;
|
||||
}
|
||||
case CS::TA_REQUEST_ENCRYPT_KEY_PENDING:
|
||||
{
|
||||
Trust_anchor_request::Type const trust_anchor_req_type {
|
||||
Trust_anchor_request::ENCRYPT_KEY };
|
||||
|
||||
Trust_anchor_request::create(
|
||||
buf_ptr, buf_size, SB_INITIALIZER, id,
|
||||
trust_anchor_req_type,
|
||||
(void*)&channel._key_plain.value,
|
||||
nullptr, nullptr, nullptr);
|
||||
|
||||
return true;
|
||||
}
|
||||
case CS::TA_REQUEST_SECURE_SB_PENDING:
|
||||
{
|
||||
Trust_anchor_request::Type const trust_anchor_req_type {
|
||||
Trust_anchor_request::SECURE_SUPERBLOCK };
|
||||
|
||||
Trust_anchor_request::create(
|
||||
buf_ptr, buf_size, SB_INITIALIZER, id,
|
||||
trust_anchor_req_type,
|
||||
nullptr, nullptr, nullptr,
|
||||
(void*)&channel._sb_hash);
|
||||
|
||||
return true;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
void Sb_initializer::_drop_generated_request(Module_request &req)
|
||||
{
|
||||
Module_request_id const id { req.src_request_id() };
|
||||
if (id >= NR_OF_CHANNELS) {
|
||||
class Bad_id { };
|
||||
throw Bad_id { };
|
||||
}
|
||||
switch (_channels[id]._state) {
|
||||
case Channel::VBD_REQUEST_PENDING:
|
||||
_channels[id]._state = Channel::VBD_REQUEST_IN_PROGRESS;
|
||||
break;
|
||||
case Channel::FT_REQUEST_PENDING:
|
||||
_channels[id]._state = Channel::FT_REQUEST_IN_PROGRESS;
|
||||
break;
|
||||
case Channel::MT_REQUEST_PENDING:
|
||||
_channels[id]._state = Channel::MT_REQUEST_IN_PROGRESS;
|
||||
break;
|
||||
case Channel::WRITE_REQUEST_PENDING:
|
||||
_channels[id]._state = Channel::WRITE_REQUEST_IN_PROGRESS;
|
||||
break;
|
||||
case Channel::SYNC_REQUEST_PENDING:
|
||||
_channels[id]._state = Channel::SYNC_REQUEST_IN_PROGRESS;
|
||||
break;
|
||||
case Channel::TA_REQUEST_CREATE_KEY_PENDING:
|
||||
_channels[id]._state = Channel::TA_REQUEST_CREATE_KEY_IN_PROGRESS;
|
||||
break;
|
||||
case Channel::TA_REQUEST_ENCRYPT_KEY_PENDING:
|
||||
_channels[id]._state = Channel::TA_REQUEST_ENCRYPT_KEY_IN_PROGRESS;
|
||||
break;
|
||||
case Channel::TA_REQUEST_SECURE_SB_PENDING:
|
||||
_channels[id]._state = Channel::TA_REQUEST_SECURE_SB_IN_PROGRESS;
|
||||
break;
|
||||
default:
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Sb_initializer::generated_request_complete(Module_request &req)
|
||||
{
|
||||
Module_request_id const id { req.src_request_id() };
|
||||
if (id >= NR_OF_CHANNELS) {
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
Channel &channel = _channels[id];
|
||||
|
||||
switch (channel._state) {
|
||||
case Channel::VBD_REQUEST_IN_PROGRESS:
|
||||
Request &req { *_req_ptr };
|
||||
switch (_state) {
|
||||
case REQ_SUBMITTED:
|
||||
{
|
||||
if (req.dst_module_id() != VBD_INITIALIZER) {
|
||||
class Exception_3 { };
|
||||
throw Exception_3 { };
|
||||
}
|
||||
Vbd_initializer_request const *vbd_initializer_req = static_cast<Vbd_initializer_request const*>(&req);
|
||||
channel._state = Channel::VBD_REQUEST_COMPLETE;
|
||||
channel._generated_req_success = vbd_initializer_req->success();
|
||||
memcpy(&channel._vbd_node,
|
||||
const_cast<Vbd_initializer_request*>(vbd_initializer_req)->root_node(),
|
||||
sizeof(Type_1_node));
|
||||
|
||||
_sb_idx = 0;
|
||||
_sb = { };
|
||||
Snapshot &snap = _sb.snapshots.items[0];
|
||||
_vbd.construct(snap.pba, snap.gen, snap.hash, req._vbd_max_lvl, req._vbd_degree, req._vbd_num_leaves);
|
||||
_generate_req<Vbd_initializer_request>(INIT_VBD_SUCCEEDED, progress, *_vbd, req._pba_alloc);
|
||||
progress = true;
|
||||
break;
|
||||
}
|
||||
case Channel::FT_REQUEST_IN_PROGRESS:
|
||||
case INIT_VBD_SUCCEEDED:
|
||||
|
||||
_ft.construct(_sb.free_number, _sb.free_gen, _sb.free_hash, req._ft_max_lvl, req._ft_degree, req._ft_num_leaves);
|
||||
_generate_req<Ft_initializer_request>(INIT_FT_SUCCEEDED, progress, *_ft, req._pba_alloc);
|
||||
break;
|
||||
|
||||
case INIT_FT_SUCCEEDED:
|
||||
|
||||
_mt.construct(_sb.meta_number, _sb.meta_gen, _sb.meta_hash, req._ft_max_lvl, req._ft_degree, req._ft_num_leaves);
|
||||
_generate_req<Ft_initializer_request>(INIT_MT_SUCCEEDED, progress, *_mt, req._pba_alloc);
|
||||
break;
|
||||
|
||||
case INIT_MT_SUCCEEDED:
|
||||
|
||||
_generate_req<Trust_anchor::Create_key>(CREATE_KEY_SUCCEEDED, progress, _sb.current_key.value);
|
||||
break;
|
||||
|
||||
case CREATE_KEY_SUCCEEDED:
|
||||
|
||||
_generate_req<Trust_anchor::Encrypt_key>(ENCRYPT_KEY_SUCCEEDED, progress, _sb.current_key.value, _sb.current_key.value);
|
||||
break;
|
||||
|
||||
case ENCRYPT_KEY_SUCCEEDED:
|
||||
{
|
||||
if (req.dst_module_id() != FT_INITIALIZER) {
|
||||
class Exception_4 { };
|
||||
throw Exception_4 { };
|
||||
}
|
||||
Ft_initializer_request const *ft_initializer_req = static_cast<Ft_initializer_request const*>(&req);
|
||||
channel._state = Channel::FT_REQUEST_COMPLETE;
|
||||
channel._generated_req_success = ft_initializer_req->success();
|
||||
memcpy(&channel._ft_node,
|
||||
const_cast<Ft_initializer_request*>(ft_initializer_req)->root_node(),
|
||||
sizeof(Type_1_node));
|
||||
|
||||
Snapshot &snap = _sb.snapshots.items[0];
|
||||
snap.gen = 0;
|
||||
snap.nr_of_leaves = req._vbd_num_leaves;
|
||||
snap.max_level = req._vbd_max_lvl;
|
||||
snap.valid = true;
|
||||
snap.id = 0;
|
||||
_sb.current_key.id = 1;
|
||||
_sb.state = Superblock::NORMAL;
|
||||
_sb.degree = req._vbd_degree;
|
||||
_sb.first_pba = req._pba_alloc.first_pba() - NR_OF_SUPERBLOCK_SLOTS;
|
||||
_sb.nr_of_pbas = req._pba_alloc.num_used_pbas() + NR_OF_SUPERBLOCK_SLOTS;
|
||||
_sb.free_max_level = _ft->max_lvl;
|
||||
_sb.free_degree = _ft->degree;
|
||||
_sb.free_leaves = _ft->num_leaves;
|
||||
_sb.meta_max_level = _mt->max_lvl;
|
||||
_sb.meta_degree = _mt->degree;
|
||||
_sb.meta_leaves = _mt->num_leaves;
|
||||
_sb.encode_to_blk(_blk);
|
||||
_generate_req<Block_io::Write>(WRITE_BLK_SUCCEEDED, progress, _sb_idx, _blk);
|
||||
break;
|
||||
}
|
||||
case Channel::MT_REQUEST_IN_PROGRESS:
|
||||
{
|
||||
if (req.dst_module_id() != FT_INITIALIZER) {
|
||||
class Exception_5 { };
|
||||
throw Exception_5 { };
|
||||
}
|
||||
channel._state = Channel::MT_REQUEST_COMPLETE;
|
||||
Ft_initializer_request const *ft_initializer_req = static_cast<Ft_initializer_request const*>(&req);
|
||||
case WRITE_BLK_SUCCEEDED:
|
||||
|
||||
memcpy(&channel._mt_node,
|
||||
const_cast<Ft_initializer_request*>(ft_initializer_req)->root_node(),
|
||||
sizeof(Type_1_node));
|
||||
|
||||
channel._generated_req_success =
|
||||
ft_initializer_req->success();
|
||||
_generate_req<Block_io::Sync>(_sb_idx ? SB_COMPLETE : WRITE_HASH_TO_TA, progress);
|
||||
progress = true;
|
||||
break;
|
||||
}
|
||||
case Channel::TA_REQUEST_CREATE_KEY_IN_PROGRESS:
|
||||
{
|
||||
if (req.dst_module_id() != TRUST_ANCHOR) {
|
||||
class Exception_6 { };
|
||||
throw Exception_6 { };
|
||||
}
|
||||
Trust_anchor_request const *trust_anchor_req = static_cast<Trust_anchor_request const*>(&req);
|
||||
channel._state = Channel::TA_REQUEST_CREATE_KEY_COMPLETE;
|
||||
channel._generated_req_success = trust_anchor_req->success();
|
||||
memcpy(&channel._key_plain.value,
|
||||
const_cast<Trust_anchor_request*>(trust_anchor_req)->key_plaintext_ptr(),
|
||||
sizeof(channel._key_plain.value));
|
||||
|
||||
case WRITE_HASH_TO_TA:
|
||||
|
||||
calc_hash(_blk, _hash);
|
||||
_generate_req<Trust_anchor::Write_hash>(SB_COMPLETE, progress, _hash);
|
||||
break;
|
||||
}
|
||||
case Channel::TA_REQUEST_ENCRYPT_KEY_IN_PROGRESS:
|
||||
{
|
||||
if (req.dst_module_id() != TRUST_ANCHOR) {
|
||||
class Exception_7 { };
|
||||
throw Exception_7 { };
|
||||
}
|
||||
channel._state = Channel::TA_REQUEST_ENCRYPT_KEY_COMPLETE;
|
||||
Trust_anchor_request const *trust_anchor_req =
|
||||
static_cast<Trust_anchor_request const*>(&req);
|
||||
|
||||
/* store and set ID to copy later on */
|
||||
memcpy(&channel._key_cipher.value,
|
||||
const_cast<Trust_anchor_request*>(trust_anchor_req)->key_ciphertext_ptr(),
|
||||
sizeof(channel._key_cipher.value));
|
||||
channel._key_cipher.id = 1;
|
||||
case SB_COMPLETE:
|
||||
|
||||
channel._generated_req_success =
|
||||
trust_anchor_req->success();
|
||||
if (_sb_idx < NR_OF_SUPERBLOCK_SLOTS - 1) {
|
||||
_sb_idx++;
|
||||
_sb = { };
|
||||
_sb.encode_to_blk(_blk);
|
||||
_generate_req<Block_io::Write>(WRITE_BLK_SUCCEEDED, progress, _sb_idx, _blk);
|
||||
} else
|
||||
_mark_req_successful(progress);
|
||||
break;
|
||||
}
|
||||
case Channel::TA_REQUEST_SECURE_SB_IN_PROGRESS:
|
||||
{
|
||||
if (req.dst_module_id() != TRUST_ANCHOR) {
|
||||
class Exception_8 { };
|
||||
throw Exception_8 { };
|
||||
}
|
||||
channel._state = Channel::TA_REQUEST_SECURE_SB_COMPLETE;
|
||||
Trust_anchor_request const *trust_anchor_req =
|
||||
static_cast<Trust_anchor_request const*>(&req);
|
||||
|
||||
channel._generated_req_success =
|
||||
trust_anchor_req->success();
|
||||
break;
|
||||
}
|
||||
case Channel::WRITE_REQUEST_IN_PROGRESS:
|
||||
{
|
||||
if (req.dst_module_id() != BLOCK_IO) {
|
||||
class Exception_9 { };
|
||||
throw Exception_9 { };
|
||||
}
|
||||
channel._state = Channel::WRITE_REQUEST_COMPLETE;
|
||||
Block_io_request const *block_io_req =
|
||||
static_cast<Block_io_request const*>(&req);
|
||||
|
||||
channel._generated_req_success =
|
||||
block_io_req->success();
|
||||
break;
|
||||
}
|
||||
case Channel::SYNC_REQUEST_IN_PROGRESS:
|
||||
{
|
||||
if (req.dst_module_id() != BLOCK_IO) {
|
||||
class Exception_10 { };
|
||||
throw Exception_10 { };
|
||||
}
|
||||
channel._state = Channel::SYNC_REQUEST_COMPLETE;
|
||||
Block_io_request const *block_io_req =
|
||||
static_cast<Block_io_request const*>(&req);
|
||||
|
||||
channel._generated_req_success =
|
||||
block_io_req->success();
|
||||
break;
|
||||
}
|
||||
default:
|
||||
class Exception_2 { };
|
||||
throw Exception_2 { };
|
||||
default: break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Sb_initializer::Sb_initializer()
|
||||
{ }
|
||||
|
||||
|
||||
bool Sb_initializer::ready_to_submit_request()
|
||||
{
|
||||
for (Channel &channel : _channels) {
|
||||
if (channel._state == Channel::INACTIVE)
|
||||
return true;
|
||||
Module_channel_id id { 0 };
|
||||
for (Constructible<Channel> &chan : _channels) {
|
||||
chan.construct(id++);
|
||||
add_channel(*chan);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
void Sb_initializer::submit_request(Module_request &req)
|
||||
{
|
||||
for (Module_request_id id { 0 }; id < NR_OF_CHANNELS; id++) {
|
||||
if (_channels[id]._state == Channel::INACTIVE) {
|
||||
req.dst_request_id(id);
|
||||
_channels[id]._request = *static_cast<Request *>(&req);
|
||||
_channels[id]._state = Channel::SUBMITTED;
|
||||
return;
|
||||
}
|
||||
}
|
||||
class Invalid_call { };
|
||||
throw Invalid_call { };
|
||||
}
|
||||
|
||||
|
||||
void Sb_initializer::execute(bool &progress)
|
||||
{
|
||||
for (Channel &channel : _channels) {
|
||||
|
||||
if (channel._state == Channel::INACTIVE)
|
||||
continue;
|
||||
|
||||
Request &req { channel._request };
|
||||
switch (req._type) {
|
||||
case Request::INIT:
|
||||
|
||||
_execute_init(channel, progress);
|
||||
|
||||
break;
|
||||
default:
|
||||
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
}
|
||||
for_each_channel<Channel>([&] (Channel &chan) {
|
||||
chan.execute(progress); });
|
||||
}
|
||||
|
@ -1,50 +0,0 @@
|
||||
/*
|
||||
* \brief Calculate SHA256 hash over data blocks of a size of 4096 bytes
|
||||
* \author Martin Stein
|
||||
* \date 2023-02-13
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2023 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
/* tresor includes */
|
||||
#include <tresor/sha256_4k_hash.h>
|
||||
#include <tresor/types.h>
|
||||
|
||||
/* base includes */
|
||||
#include <util/string.h>
|
||||
|
||||
/* libcrypto */
|
||||
#include <openssl/sha.h>
|
||||
|
||||
|
||||
bool Tresor::check_sha256_4k_hash(Block const &blk,
|
||||
Hash const &expected_hash)
|
||||
{
|
||||
Hash got_hash;
|
||||
calc_sha256_4k_hash(blk, got_hash);
|
||||
return got_hash == expected_hash;
|
||||
}
|
||||
|
||||
|
||||
void Tresor::calc_sha256_4k_hash(Block const &blk,
|
||||
Hash &hash)
|
||||
{
|
||||
SHA256_CTX context { };
|
||||
if (!SHA256_Init(&context)) {
|
||||
class Calc_sha256_4k_hash_init_error { };
|
||||
throw Calc_sha256_4k_hash_init_error { };
|
||||
}
|
||||
if (!SHA256_Update(&context, &blk, BLOCK_SIZE)) {
|
||||
class Calc_sha256_4k_hash_update_error { };
|
||||
throw Calc_sha256_4k_hash_update_error { };
|
||||
}
|
||||
if (!SHA256_Final((unsigned char *)(&hash), &context)) {
|
||||
class Calc_sha256_4k_hash_final_error { };
|
||||
throw Calc_sha256_4k_hash_final_error { };
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -11,546 +11,179 @@
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
/* base includes */
|
||||
#include <base/log.h>
|
||||
|
||||
/* tresor includes */
|
||||
#include <tresor/trust_anchor.h>
|
||||
|
||||
using namespace Tresor;
|
||||
|
||||
|
||||
/**************************
|
||||
** Trust_anchor_request **
|
||||
**************************/
|
||||
|
||||
void Trust_anchor_request::create(void *buf_ptr,
|
||||
size_t buf_size,
|
||||
uint64_t src_module_id,
|
||||
uint64_t src_request_id,
|
||||
size_t req_type,
|
||||
void *key_plaintext_ptr,
|
||||
void *key_ciphertext_ptr,
|
||||
char const *passphrase_ptr,
|
||||
void *hash_ptr)
|
||||
{
|
||||
Trust_anchor_request req { src_module_id, src_request_id };
|
||||
req._type = (Type)req_type;
|
||||
req._passphrase_ptr = (addr_t)passphrase_ptr;
|
||||
if (key_plaintext_ptr != nullptr)
|
||||
memcpy(
|
||||
&req._key_plaintext, key_plaintext_ptr,
|
||||
sizeof(req._key_plaintext));
|
||||
|
||||
if (key_ciphertext_ptr != nullptr)
|
||||
memcpy(
|
||||
&req._key_ciphertext, key_ciphertext_ptr,
|
||||
sizeof(req._key_ciphertext));
|
||||
|
||||
if (hash_ptr != nullptr)
|
||||
memcpy(&req._hash, hash_ptr, sizeof(req._hash));
|
||||
|
||||
if (sizeof(req) > buf_size) {
|
||||
class Exception_2 { };
|
||||
throw Exception_2 { };
|
||||
}
|
||||
memcpy(buf_ptr, &req, sizeof(req));
|
||||
}
|
||||
|
||||
|
||||
Trust_anchor_request::Trust_anchor_request(Module_id src_module_id,
|
||||
Module_request_id src_request_id)
|
||||
Trust_anchor_request::Trust_anchor_request(Module_id src_module_id, Module_channel_id src_chan_id,
|
||||
Type type, Key_value &key_plaintext, Key_value &key_ciphertext,
|
||||
Hash &hash, Passphrase passphrase, bool &success)
|
||||
:
|
||||
Module_request { src_module_id, src_request_id, TRUST_ANCHOR }
|
||||
Module_request { src_module_id, src_chan_id, TRUST_ANCHOR }, _type { type }, _key_plaintext { key_plaintext },
|
||||
_key_ciphertext { key_ciphertext }, _hash { hash }, _pass { passphrase }, _success { success }
|
||||
{ }
|
||||
|
||||
|
||||
char const *Trust_anchor_request::type_to_string(Type type)
|
||||
{
|
||||
switch (type) {
|
||||
case INVALID: return "invalid";
|
||||
case CREATE_KEY: return "create_key";
|
||||
case ENCRYPT_KEY: return "encrypt_key";
|
||||
case DECRYPT_KEY: return "decrypt_key";
|
||||
case SECURE_SUPERBLOCK: return "secure_superblock";
|
||||
case GET_LAST_SB_HASH: return "get_last_sb_hash";
|
||||
case WRITE_HASH: return "write_hash";
|
||||
case READ_HASH: return "read_hash";
|
||||
case INITIALIZE: return "initialize";
|
||||
}
|
||||
return "?";
|
||||
ASSERT_NEVER_REACHED;
|
||||
}
|
||||
|
||||
|
||||
/******************
|
||||
** Trust_anchor **
|
||||
******************/
|
||||
|
||||
void Trust_anchor::_execute_write_read_operation(Vfs::Vfs_handle &file,
|
||||
String<128> const &file_path,
|
||||
Channel &channel,
|
||||
char const *write_buf,
|
||||
char *read_buf,
|
||||
size_t read_size,
|
||||
bool &progress)
|
||||
void Trust_anchor_channel::_mark_req_failed(bool &progress, Error_string str)
|
||||
{
|
||||
Request &req { channel._request };
|
||||
switch (channel._state) {
|
||||
case Channel::WRITE_PENDING:
|
||||
error("trust_anchor: request (", *_req_ptr, ") failed: ", str);
|
||||
_req_ptr->_success = false;
|
||||
_state = REQ_COMPLETE;
|
||||
_req_ptr = nullptr;
|
||||
progress = true;
|
||||
}
|
||||
|
||||
file.seek(channel._file_offset);
|
||||
channel._state = Channel::WRITE_IN_PROGRESS;
|
||||
progress = true;
|
||||
return;
|
||||
|
||||
case Channel::WRITE_IN_PROGRESS:
|
||||
{
|
||||
size_t nr_of_written_bytes { 0 };
|
||||
void Trust_anchor_channel::_mark_req_successful(bool &progress)
|
||||
{
|
||||
Request &req { *_req_ptr };
|
||||
req._success = true;
|
||||
_state = REQ_COMPLETE;
|
||||
_req_ptr = nullptr;
|
||||
progress = true;
|
||||
}
|
||||
|
||||
Const_byte_range_ptr src {
|
||||
write_buf + channel._file_offset, channel._file_size };
|
||||
|
||||
Write_result const result =
|
||||
file.fs().write(&file, src, nr_of_written_bytes);
|
||||
|
||||
switch (result) {
|
||||
|
||||
case Write_result::WRITE_ERR_WOULD_BLOCK:
|
||||
return;
|
||||
|
||||
case Write_result::WRITE_OK:
|
||||
|
||||
channel._file_offset += nr_of_written_bytes;
|
||||
channel._file_size -= nr_of_written_bytes;
|
||||
|
||||
if (channel._file_size > 0) {
|
||||
|
||||
channel._state = Channel::WRITE_PENDING;
|
||||
progress = true;
|
||||
return;
|
||||
}
|
||||
channel._state = Channel::READ_PENDING;
|
||||
channel._file_offset = 0;
|
||||
channel._file_size = read_size;
|
||||
progress = true;
|
||||
return;
|
||||
|
||||
default:
|
||||
|
||||
req._success = false;
|
||||
error("failed to write file ", file_path);
|
||||
channel._state = Channel::COMPLETE;
|
||||
progress = true;
|
||||
return;
|
||||
}
|
||||
}
|
||||
case Channel::READ_PENDING:
|
||||
|
||||
file.seek(channel._file_offset);
|
||||
|
||||
if (!file.fs().queue_read(&file, channel._file_size)) {
|
||||
return;
|
||||
}
|
||||
channel._state = Channel::READ_IN_PROGRESS;
|
||||
progress = true;
|
||||
return;
|
||||
|
||||
case Channel::READ_IN_PROGRESS:
|
||||
{
|
||||
size_t nr_of_read_bytes { 0 };
|
||||
|
||||
Byte_range_ptr dst {
|
||||
read_buf + channel._file_offset, channel._file_size };
|
||||
|
||||
Read_result const result {
|
||||
file.fs().complete_read( &file, dst, nr_of_read_bytes) };
|
||||
|
||||
switch (result) {
|
||||
case Read_result::READ_QUEUED:
|
||||
case Read_result::READ_ERR_WOULD_BLOCK:
|
||||
|
||||
return;
|
||||
|
||||
case Read_result::READ_OK:
|
||||
|
||||
channel._file_offset += nr_of_read_bytes;
|
||||
channel._file_size -= nr_of_read_bytes;
|
||||
req._success = true;
|
||||
|
||||
if (channel._file_size > 0) {
|
||||
|
||||
channel._state = Channel::READ_PENDING;
|
||||
progress = true;
|
||||
return;
|
||||
}
|
||||
channel._state = Channel::COMPLETE;
|
||||
progress = true;
|
||||
return;
|
||||
|
||||
default:
|
||||
|
||||
req._success = false;
|
||||
error("failed to read file ", file_path);
|
||||
channel._state = Channel::COMPLETE;
|
||||
return;
|
||||
}
|
||||
}
|
||||
default:
|
||||
|
||||
return;
|
||||
void Trust_anchor_channel::_read_hash(bool &progress)
|
||||
{
|
||||
Request &req { *_req_ptr };
|
||||
switch (_state) {
|
||||
case REQ_SUBMITTED: _hashsum_file.read(READ_OK, FILE_ERR, 0, { (char *)&req._hash, HASH_SIZE }, progress); break;
|
||||
case READ_OK: _mark_req_successful(progress); break;
|
||||
case FILE_ERR: _mark_req_failed(progress, "file operation failed"); break;
|
||||
default: break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Trust_anchor::_execute_write_operation(Vfs::Vfs_handle &file,
|
||||
String<128> const &file_path,
|
||||
Channel &channel,
|
||||
char const *write_buf,
|
||||
bool &progress,
|
||||
bool result_via_read)
|
||||
void Trust_anchor_channel::_create_key(bool &progress)
|
||||
{
|
||||
Request &req { channel._request };
|
||||
switch (channel._state) {
|
||||
case Channel::WRITE_PENDING:
|
||||
|
||||
file.seek(channel._file_offset);
|
||||
channel._state = Channel::WRITE_IN_PROGRESS;
|
||||
progress = true;
|
||||
return;
|
||||
|
||||
case Channel::WRITE_IN_PROGRESS:
|
||||
{
|
||||
size_t nr_of_written_bytes { 0 };
|
||||
|
||||
Const_byte_range_ptr src {
|
||||
write_buf + channel._file_offset, channel._file_size };
|
||||
|
||||
Write_result const result =
|
||||
file.fs().write(&file, src, nr_of_written_bytes);
|
||||
|
||||
switch (result) {
|
||||
|
||||
case Write_result::WRITE_ERR_WOULD_BLOCK:
|
||||
return;
|
||||
|
||||
case Write_result::WRITE_OK:
|
||||
|
||||
channel._file_offset += nr_of_written_bytes;
|
||||
channel._file_size -= nr_of_written_bytes;
|
||||
|
||||
if (channel._file_size > 0) {
|
||||
|
||||
channel._state = Channel::WRITE_PENDING;
|
||||
progress = true;
|
||||
return;
|
||||
}
|
||||
channel._state = Channel::READ_PENDING;
|
||||
channel._file_offset = 0;
|
||||
|
||||
if (result_via_read)
|
||||
channel._file_size = sizeof(_read_buf);
|
||||
else
|
||||
channel._file_size = 0;
|
||||
|
||||
progress = true;
|
||||
return;
|
||||
|
||||
default:
|
||||
|
||||
req._success = false;
|
||||
error("failed to write file ", file_path);
|
||||
channel._state = Channel::COMPLETE;
|
||||
progress = true;
|
||||
return;
|
||||
}
|
||||
}
|
||||
case Channel::READ_PENDING:
|
||||
|
||||
file.seek(channel._file_offset);
|
||||
|
||||
if (!file.fs().queue_read(&file, channel._file_size)) {
|
||||
return;
|
||||
}
|
||||
channel._state = Channel::READ_IN_PROGRESS;
|
||||
progress = true;
|
||||
return;
|
||||
|
||||
case Channel::READ_IN_PROGRESS:
|
||||
{
|
||||
size_t nr_of_read_bytes { 0 };
|
||||
Byte_range_ptr dst {
|
||||
_read_buf + channel._file_offset, channel._file_size };
|
||||
|
||||
Read_result const result {
|
||||
file.fs().complete_read(&file, dst, nr_of_read_bytes) };
|
||||
|
||||
switch (result) {
|
||||
case Read_result::READ_QUEUED:
|
||||
case Read_result::READ_ERR_WOULD_BLOCK:
|
||||
|
||||
return;
|
||||
|
||||
case Read_result::READ_OK:
|
||||
|
||||
channel._file_offset += nr_of_read_bytes;
|
||||
channel._file_size -= nr_of_read_bytes;
|
||||
|
||||
if (channel._file_size > 0) {
|
||||
|
||||
channel._state = Channel::READ_PENDING;
|
||||
progress = true;
|
||||
return;
|
||||
}
|
||||
if (result_via_read) {
|
||||
req._success = !strcmp(_read_buf, "ok", 3);
|
||||
} else
|
||||
req._success = true;
|
||||
|
||||
channel._state = Channel::COMPLETE;
|
||||
progress = true;
|
||||
return;
|
||||
|
||||
default:
|
||||
|
||||
req._success = false;
|
||||
error("failed to read file ", file_path);
|
||||
channel._state = Channel::COMPLETE;
|
||||
return;
|
||||
}
|
||||
}
|
||||
default:
|
||||
|
||||
return;
|
||||
Request &req { *_req_ptr };
|
||||
switch (_state) {
|
||||
case REQ_SUBMITTED: _generate_key_file.read(READ_OK, FILE_ERR, 0, { (char *)&req._key_plaintext, KEY_SIZE }, progress); break;
|
||||
case READ_OK: _mark_req_successful(progress); break;
|
||||
case FILE_ERR: _mark_req_failed(progress, "file operation failed"); break;
|
||||
default: break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Trust_anchor::_execute_read_operation(Vfs::Vfs_handle &file,
|
||||
String<128> const &file_path,
|
||||
Channel &channel,
|
||||
char *read_buf,
|
||||
bool &progress)
|
||||
void Trust_anchor_channel::_initialize(bool &progress)
|
||||
{
|
||||
Request &req { channel._request };
|
||||
switch (channel._state) {
|
||||
case Channel::READ_PENDING:
|
||||
Request &req { *_req_ptr };
|
||||
switch (_state) {
|
||||
case REQ_SUBMITTED: _initialize_file.write(WRITE_OK, FILE_ERR, 0, { req._pass.string(), req._pass.length() - 1 }, progress); break;
|
||||
case WRITE_OK: _initialize_file.read(READ_OK, FILE_ERR, 0, { _result_buf, sizeof(_result_buf) }, progress); break;
|
||||
case READ_OK:
|
||||
|
||||
file.seek(channel._file_offset);
|
||||
if (strcmp(_result_buf, "ok", sizeof(_result_buf)))
|
||||
_mark_req_failed(progress, { "trust anchor did not return \"ok\""});
|
||||
else
|
||||
_mark_req_successful(progress);
|
||||
break;
|
||||
|
||||
if (!file.fs().queue_read(&file, channel._file_size)) {
|
||||
return;
|
||||
}
|
||||
channel._state = Channel::READ_IN_PROGRESS;
|
||||
progress = true;
|
||||
case FILE_ERR: _mark_req_failed(progress, "file operation failed"); break;
|
||||
default: break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Trust_anchor_channel::_write_hash(bool &progress)
|
||||
{
|
||||
Request &req { *_req_ptr };
|
||||
switch (_state) {
|
||||
case REQ_SUBMITTED: _hashsum_file.write(WRITE_OK, FILE_ERR, 0, { (char *)&req._hash, HASH_SIZE }, progress); break;
|
||||
case WRITE_OK: _hashsum_file.read(READ_OK, FILE_ERR, 0, { _result_buf, 0 }, progress); break;
|
||||
case READ_OK: _mark_req_successful(progress); break;
|
||||
case FILE_ERR: _mark_req_failed(progress, "file operation failed"); break;
|
||||
default: break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Trust_anchor_channel::_encrypt_key(bool &progress)
|
||||
{
|
||||
Request &req { *_req_ptr };
|
||||
switch (_state) {
|
||||
case REQ_SUBMITTED: _encrypt_file.write(WRITE_OK, FILE_ERR, 0, { (char *)&req._key_plaintext, KEY_SIZE }, progress); break;
|
||||
case WRITE_OK: _encrypt_file.read(READ_OK, FILE_ERR, 0, { (char *)&req._key_ciphertext, KEY_SIZE }, progress); break;
|
||||
case READ_OK: _mark_req_successful(progress); break;
|
||||
case FILE_ERR: _mark_req_failed(progress, "file operation failed"); break;
|
||||
default: break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Trust_anchor_channel::_decrypt_key(bool &progress)
|
||||
{
|
||||
Request &req { *_req_ptr };
|
||||
switch (_state) {
|
||||
case REQ_SUBMITTED: _decrypt_file.write(WRITE_OK, FILE_ERR, 0, { (char *)&req._key_ciphertext, KEY_SIZE }, progress); break;
|
||||
case WRITE_OK: _decrypt_file.read(READ_OK, FILE_ERR, 0, { (char *)&req._key_plaintext, KEY_SIZE }, progress); break;
|
||||
case READ_OK: _mark_req_successful(progress); break;
|
||||
case FILE_ERR: _mark_req_failed(progress, "file operation failed"); break;
|
||||
default: break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Trust_anchor_channel::execute(bool &progress)
|
||||
{
|
||||
if (!_req_ptr)
|
||||
return;
|
||||
|
||||
case Channel::READ_IN_PROGRESS:
|
||||
{
|
||||
size_t nr_of_read_bytes { 0 };
|
||||
Byte_range_ptr dst {
|
||||
read_buf + channel._file_offset, channel._file_size };
|
||||
|
||||
Read_result const result {
|
||||
file.fs().complete_read(&file, dst, nr_of_read_bytes) };
|
||||
|
||||
switch (result) {
|
||||
case Read_result::READ_QUEUED:
|
||||
case Read_result::READ_ERR_WOULD_BLOCK:
|
||||
|
||||
return;
|
||||
|
||||
case Read_result::READ_OK:
|
||||
|
||||
channel._file_offset += nr_of_read_bytes;
|
||||
channel._file_size -= nr_of_read_bytes;
|
||||
req._success = true;
|
||||
|
||||
if (channel._file_size > 0) {
|
||||
|
||||
channel._state = Channel::READ_PENDING;
|
||||
progress = true;
|
||||
return;
|
||||
}
|
||||
channel._state = Channel::COMPLETE;
|
||||
progress = true;
|
||||
return;
|
||||
|
||||
default:
|
||||
|
||||
req._success = false;
|
||||
error("failed to read file ", file_path);
|
||||
channel._state = Channel::COMPLETE;
|
||||
return;
|
||||
}
|
||||
Request &req { *_req_ptr };
|
||||
switch (req._type) {
|
||||
case Request::INITIALIZE: _initialize(progress); break;
|
||||
case Request::WRITE_HASH: _write_hash(progress); break;
|
||||
case Request::READ_HASH: _read_hash(progress); break;
|
||||
case Request::CREATE_KEY: _create_key(progress); break;
|
||||
case Request::ENCRYPT_KEY: _encrypt_key(progress); break;
|
||||
case Request::DECRYPT_KEY: _decrypt_key(progress); break;
|
||||
}
|
||||
default:
|
||||
}
|
||||
|
||||
return;
|
||||
|
||||
Trust_anchor_channel::Trust_anchor_channel(Module_channel_id id, Vfs::Env &vfs_env, Xml_node const &xml_node)
|
||||
:
|
||||
Module_channel { TRUST_ANCHOR, id }, _vfs_env { vfs_env }, _path { xml_node.attribute_value("path", Tresor::Path()) }
|
||||
{ }
|
||||
|
||||
|
||||
Trust_anchor::Trust_anchor(Vfs::Env &vfs_env, Xml_node const &xml_node)
|
||||
{
|
||||
Module_channel_id id { 0 };
|
||||
for (Constructible<Channel> &chan : _channels) {
|
||||
chan.construct(id++, vfs_env, xml_node);
|
||||
add_channel(*chan);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Trust_anchor_channel::_request_submitted(Module_request &mod_req)
|
||||
{
|
||||
_req_ptr = static_cast<Request *>(&mod_req);
|
||||
_state = REQ_SUBMITTED;
|
||||
}
|
||||
|
||||
|
||||
void Trust_anchor::execute(bool &progress)
|
||||
{
|
||||
for (Channel &channel : _channels) {
|
||||
|
||||
if (channel._state == Channel::INACTIVE)
|
||||
continue;
|
||||
|
||||
Request &req { channel._request };
|
||||
switch (req._type) {
|
||||
case Request::INITIALIZE:
|
||||
|
||||
if (channel._state == Channel::SUBMITTED) {
|
||||
channel._state = Channel::WRITE_PENDING;
|
||||
channel._file_offset = 0;
|
||||
channel._file_size =
|
||||
strlen((char const *)req._passphrase_ptr);
|
||||
}
|
||||
_execute_write_operation(
|
||||
_initialize_file, _initialize_path, channel,
|
||||
(char const *)req._passphrase_ptr, progress, true);
|
||||
|
||||
break;
|
||||
|
||||
case Request::SECURE_SUPERBLOCK:
|
||||
|
||||
if (channel._state == Channel::SUBMITTED) {
|
||||
channel._state = Channel::WRITE_PENDING;
|
||||
channel._file_offset = 0;
|
||||
channel._file_size = sizeof(req._hash);
|
||||
}
|
||||
_execute_write_operation(
|
||||
_hashsum_file, _hashsum_path, channel,
|
||||
(char const *)&req._hash, progress, false);
|
||||
|
||||
break;
|
||||
|
||||
case Request::GET_LAST_SB_HASH:
|
||||
|
||||
if (channel._state == Channel::SUBMITTED) {
|
||||
channel._state = Channel::READ_PENDING;
|
||||
channel._file_offset = 0;
|
||||
channel._file_size = sizeof(req._hash);
|
||||
}
|
||||
_execute_read_operation(
|
||||
_hashsum_file, _hashsum_path, channel,
|
||||
(char *)&req._hash, progress);
|
||||
|
||||
break;
|
||||
|
||||
case Request::CREATE_KEY:
|
||||
|
||||
if (channel._state == Channel::SUBMITTED) {
|
||||
channel._state = Channel::READ_PENDING;
|
||||
channel._file_offset = 0;
|
||||
channel._file_size = sizeof(req._key_plaintext);
|
||||
}
|
||||
_execute_read_operation(
|
||||
_generate_key_file, _generate_key_path, channel,
|
||||
(char *)req._key_plaintext, progress);
|
||||
|
||||
break;
|
||||
|
||||
case Request::ENCRYPT_KEY:
|
||||
|
||||
if (channel._state == Channel::SUBMITTED) {
|
||||
channel._state = Channel::WRITE_PENDING;
|
||||
channel._file_offset = 0;
|
||||
channel._file_size = sizeof(req._key_plaintext);
|
||||
}
|
||||
_execute_write_read_operation(
|
||||
_encrypt_file, _encrypt_path, channel,
|
||||
(char const *)req._key_plaintext,
|
||||
(char *)req._key_ciphertext,
|
||||
sizeof(req._key_ciphertext),
|
||||
progress);
|
||||
|
||||
break;
|
||||
|
||||
case Request::DECRYPT_KEY:
|
||||
|
||||
if (channel._state == Channel::SUBMITTED) {
|
||||
channel._state = Channel::WRITE_PENDING;
|
||||
channel._file_offset = 0;
|
||||
channel._file_size = sizeof(req._key_ciphertext);
|
||||
}
|
||||
_execute_write_read_operation(
|
||||
_decrypt_file, _decrypt_path, channel,
|
||||
(char const *)req._key_ciphertext,
|
||||
(char *)req._key_plaintext,
|
||||
sizeof(req._key_plaintext),
|
||||
progress);
|
||||
|
||||
break;
|
||||
|
||||
default:
|
||||
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Trust_anchor::Trust_anchor(Vfs::Env &vfs_env,
|
||||
Xml_node const &xml_node)
|
||||
:
|
||||
_vfs_env { vfs_env },
|
||||
_path { xml_node.attribute_value("path", String<128>()) }
|
||||
{ }
|
||||
|
||||
|
||||
bool Trust_anchor::_peek_completed_request(uint8_t *buf_ptr,
|
||||
size_t buf_size)
|
||||
{
|
||||
for (Channel &channel : _channels) {
|
||||
if (channel._state == Channel::COMPLETE) {
|
||||
if (sizeof(channel._request) > buf_size) {
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
memcpy(buf_ptr, &channel._request, sizeof(channel._request));
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
void Trust_anchor::_drop_completed_request(Module_request &req)
|
||||
{
|
||||
Module_request_id id { 0 };
|
||||
id = req.dst_request_id();
|
||||
if (id >= NR_OF_CHANNELS) {
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
if (_channels[id]._state != Channel::COMPLETE) {
|
||||
class Exception_2 { };
|
||||
throw Exception_2 { };
|
||||
}
|
||||
_channels[id]._state = Channel::INACTIVE;
|
||||
}
|
||||
|
||||
|
||||
bool Trust_anchor::ready_to_submit_request()
|
||||
{
|
||||
for (Channel &channel : _channels) {
|
||||
if (channel._state == Channel::INACTIVE)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void Trust_anchor::submit_request(Module_request &req)
|
||||
{
|
||||
for (Module_request_id id { 0 }; id < NR_OF_CHANNELS; id++) {
|
||||
if (_channels[id]._state == Channel::INACTIVE) {
|
||||
req.dst_request_id(id);
|
||||
_channels[id]._request = *static_cast<Request *>(&req);
|
||||
_channels[id]._state = Channel::SUBMITTED;
|
||||
return;
|
||||
}
|
||||
}
|
||||
class Invalid_call { };
|
||||
throw Invalid_call { };
|
||||
for_each_channel<Channel>([&] (Channel &chan) {
|
||||
chan.execute(progress); });
|
||||
}
|
||||
|
@ -11,453 +11,173 @@
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
/* base includes */
|
||||
#include <base/log.h>
|
||||
|
||||
/* tresor includes */
|
||||
#include <tresor/vbd_check.h>
|
||||
#include <tresor/block_io.h>
|
||||
#include <tresor/sha256_4k_hash.h>
|
||||
#include <tresor/hash.h>
|
||||
|
||||
using namespace Tresor;
|
||||
|
||||
|
||||
/***********************
|
||||
** Vbd_check_request **
|
||||
***********************/
|
||||
|
||||
Vbd_check_request::Vbd_check_request(uint64_t src_module_id,
|
||||
uint64_t src_request_id,
|
||||
Type type,
|
||||
Tree_level_index max_lvl,
|
||||
Tree_node_index max_child_idx,
|
||||
Number_of_leaves nr_of_leaves,
|
||||
Type_1_node root)
|
||||
Vbd_check_request::Vbd_check_request(Module_id src_mod, Module_channel_id src_chan, Tree_root const &vbd, bool &success)
|
||||
:
|
||||
Module_request { src_module_id, src_request_id, VBD_CHECK },
|
||||
_type { type },
|
||||
_max_lvl { max_lvl },
|
||||
_max_child_idx { max_child_idx },
|
||||
_nr_of_leaves { nr_of_leaves },
|
||||
_root { root }
|
||||
Module_request { src_mod, src_chan, VBD_CHECK }, _vbd { vbd }, _success { success }
|
||||
{ }
|
||||
|
||||
|
||||
char const *Vbd_check_request::type_to_string(Type type)
|
||||
void Vbd_check_channel::_generated_req_completed(State_uint state_uint)
|
||||
{
|
||||
switch (type) {
|
||||
case INVALID: return "invalid";
|
||||
case CHECK: return "check";
|
||||
}
|
||||
return "?";
|
||||
}
|
||||
|
||||
|
||||
/***************
|
||||
** Vbd_check **
|
||||
***************/
|
||||
|
||||
void Vbd_check::_execute_inner_t1_child(Channel &chan,
|
||||
Type_1_node const &child,
|
||||
Type_1_level &child_lvl,
|
||||
Child_state &child_state,
|
||||
Tree_level_index lvl,
|
||||
Tree_node_index child_idx,
|
||||
bool &progress)
|
||||
{
|
||||
Request &req { chan._request };
|
||||
if (child_state == Channel::READ_BLOCK) {
|
||||
|
||||
if (!child.valid()) {
|
||||
|
||||
if (req._nr_of_leaves == 0) {
|
||||
|
||||
child_state = Channel::DONE;
|
||||
progress = true;
|
||||
|
||||
if (VERBOSE_CHECK)
|
||||
log(Level_indent { lvl, req._max_lvl },
|
||||
" lvl ", lvl, " child ", child_idx,
|
||||
": expectedly invalid");
|
||||
|
||||
} else {
|
||||
|
||||
if (VERBOSE_CHECK)
|
||||
log(Level_indent { lvl, req._max_lvl },
|
||||
" lvl ", lvl, " child ", child_idx, " (", child,"): unexpectedly invalid");
|
||||
|
||||
_mark_req_failed(chan, progress, "check for valid child");
|
||||
}
|
||||
|
||||
} else if (!chan._gen_prim.valid()) {
|
||||
|
||||
chan._gen_prim = {
|
||||
.success = false,
|
||||
.tag = Channel::BLOCK_IO,
|
||||
.blk_nr = child.pba,
|
||||
.dropped = false };
|
||||
|
||||
chan._lvl_to_read = lvl - 1;
|
||||
progress = true;
|
||||
|
||||
if (VERBOSE_CHECK)
|
||||
log(Level_indent { lvl, req._max_lvl },
|
||||
" lvl ", lvl, " child ", child_idx, " (", child, "): load to lvl ", lvl - 1);
|
||||
|
||||
} else if (chan._gen_prim.tag != Channel::BLOCK_IO ||
|
||||
chan._gen_prim.blk_nr != child.pba) {
|
||||
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
|
||||
} else if (!chan._gen_prim.success) {
|
||||
|
||||
} else {
|
||||
|
||||
for (Child_state &state : child_lvl.children_state) {
|
||||
state = Channel::READ_BLOCK;
|
||||
}
|
||||
chan._gen_prim = { };
|
||||
child_state = Channel::CHECK_HASH;
|
||||
progress = true;
|
||||
}
|
||||
|
||||
} else if (child_state == Channel::CHECK_HASH) {
|
||||
|
||||
Block blk { };
|
||||
child_lvl.children.encode_to_blk(blk);
|
||||
|
||||
if (child.gen == INITIAL_GENERATION ||
|
||||
check_sha256_4k_hash(blk, child.hash)) {
|
||||
|
||||
child_state = Channel::DONE;
|
||||
if (&child_state == &chan._root_state) {
|
||||
chan._request._success = true;
|
||||
}
|
||||
progress = true;
|
||||
|
||||
if (VERBOSE_CHECK)
|
||||
log(Level_indent { lvl, req._max_lvl },
|
||||
" lvl ", lvl, " child ", child_idx, ": good hash");
|
||||
|
||||
} else {
|
||||
|
||||
if (VERBOSE_CHECK) {
|
||||
|
||||
Hash hash;
|
||||
calc_sha256_4k_hash(blk, hash);
|
||||
log(Level_indent { lvl, req._max_lvl },
|
||||
" lvl ", lvl, " child ", child_idx, " (", child, "): bad hash ", hash);
|
||||
}
|
||||
|
||||
_mark_req_failed(chan, progress, "check inner hash");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Vbd_check::_execute_leaf_child(Channel &chan,
|
||||
Type_1_node const &child,
|
||||
Block const &child_lvl,
|
||||
Child_state &child_state,
|
||||
Tree_level_index lvl,
|
||||
Tree_node_index child_idx,
|
||||
bool &progress)
|
||||
{
|
||||
Request &req { chan._request };
|
||||
if (child_state == Channel::READ_BLOCK) {
|
||||
|
||||
if (req._nr_of_leaves == 0) {
|
||||
|
||||
if (child.valid()) {
|
||||
|
||||
if (VERBOSE_CHECK)
|
||||
log(Level_indent { lvl, req._max_lvl },
|
||||
" lvl ", lvl, " child ", child_idx, " (", child, "): unexpectedly valid");
|
||||
|
||||
_mark_req_failed(chan, progress, "check for unused child");
|
||||
|
||||
} else {
|
||||
|
||||
child_state = Channel::DONE;
|
||||
progress = true;
|
||||
|
||||
if (VERBOSE_CHECK)
|
||||
log(Level_indent { lvl, req._max_lvl },
|
||||
" lvl ", lvl, " child ", child_idx, ": expectedly invalid");
|
||||
}
|
||||
|
||||
} else if (child.gen == INITIAL_GENERATION) {
|
||||
|
||||
req._nr_of_leaves--;
|
||||
child_state = Channel::DONE;
|
||||
progress = true;
|
||||
|
||||
if (VERBOSE_CHECK)
|
||||
log(Level_indent { lvl, req._max_lvl },
|
||||
" lvl ", lvl, " child ", child_idx, ": uninitialized");
|
||||
|
||||
} else if (!chan._gen_prim.valid()) {
|
||||
|
||||
chan._gen_prim = {
|
||||
.success = false,
|
||||
.tag = Channel::BLOCK_IO,
|
||||
.blk_nr = child.pba,
|
||||
.dropped = false };
|
||||
|
||||
chan._lvl_to_read = lvl - 1;
|
||||
progress = true;
|
||||
|
||||
if (VERBOSE_CHECK)
|
||||
log(Level_indent { lvl, req._max_lvl },
|
||||
" lvl ", lvl, " child ", child_idx, " (", child, "): load to lvl ", lvl - 1);
|
||||
|
||||
} else if (chan._gen_prim.tag != Channel::BLOCK_IO ||
|
||||
chan._gen_prim.blk_nr != child.pba) {
|
||||
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
|
||||
} else if (!chan._gen_prim.success) {
|
||||
|
||||
} else {
|
||||
|
||||
chan._gen_prim = { };
|
||||
child_state = Channel::CHECK_HASH;
|
||||
progress = true;
|
||||
}
|
||||
|
||||
} else if (child_state == Channel::CHECK_HASH) {
|
||||
|
||||
if (check_sha256_4k_hash(child_lvl, child.hash)) {
|
||||
|
||||
req._nr_of_leaves--;
|
||||
child_state = Channel::DONE;
|
||||
progress = true;
|
||||
|
||||
if (VERBOSE_CHECK)
|
||||
log(Level_indent { lvl, req._max_lvl },
|
||||
" lvl ", lvl, " child ", child_idx, ": good hash");
|
||||
|
||||
} else {
|
||||
|
||||
if (VERBOSE_CHECK) {
|
||||
|
||||
Hash hash;
|
||||
calc_sha256_4k_hash(child_lvl, hash);
|
||||
log(Level_indent { lvl, req._max_lvl },
|
||||
" lvl ", lvl, " child ", child_idx, " (", child, "): bad hash ", hash);
|
||||
}
|
||||
|
||||
_mark_req_failed(chan, progress, "check leaf hash");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Vbd_check::_execute_check(Channel &chan,
|
||||
bool &progress)
|
||||
{
|
||||
Request &req { chan._request };
|
||||
for (Tree_level_index lvl { VBD_LOWEST_T1_LVL }; lvl <= req._max_lvl; lvl++) {
|
||||
for (Tree_node_index child_idx { 0 };
|
||||
child_idx <= req._max_child_idx;
|
||||
child_idx++) {
|
||||
|
||||
Type_1_level &t1_lvl { chan._t1_lvls[lvl] };
|
||||
if (t1_lvl.children_state[child_idx] != Channel::DONE) {
|
||||
|
||||
if (lvl == VBD_LOWEST_T1_LVL)
|
||||
_execute_leaf_child(
|
||||
chan,
|
||||
chan._t1_lvls[lvl].children.nodes[child_idx],
|
||||
chan._leaf_lvl,
|
||||
chan._t1_lvls[lvl].children_state[child_idx],
|
||||
lvl, child_idx, progress);
|
||||
else
|
||||
_execute_inner_t1_child(
|
||||
chan,
|
||||
chan._t1_lvls[lvl].children.nodes[child_idx],
|
||||
chan._t1_lvls[lvl - 1],
|
||||
chan._t1_lvls[lvl].children_state[child_idx],
|
||||
lvl, child_idx, progress);
|
||||
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (chan._root_state != Channel::DONE) {
|
||||
|
||||
_execute_inner_t1_child(
|
||||
chan, req._root, chan._t1_lvls[req._max_lvl], chan._root_state,
|
||||
req._max_lvl + 1, 0, progress);
|
||||
|
||||
if (!_generated_req_success) {
|
||||
error("vbd check: request (", *_req_ptr, ") failed because generated request failed)");
|
||||
_req_ptr->_success = false;
|
||||
_state = REQ_COMPLETE;
|
||||
_req_ptr = nullptr;
|
||||
return;
|
||||
}
|
||||
_state = (State)state_uint;
|
||||
}
|
||||
|
||||
|
||||
void Vbd_check::_mark_req_failed(Channel &chan,
|
||||
bool &progress,
|
||||
char const *str)
|
||||
bool Vbd_check_channel::_execute_node(Tree_level_index lvl, Tree_node_index node_idx, bool &progress)
|
||||
{
|
||||
error("vbd check: request (", chan._request, ") failed at step \"", str, "\"");
|
||||
chan._request._success = false;
|
||||
chan._root_state = Channel::DONE;
|
||||
bool &check_node = _check_node[lvl][node_idx];
|
||||
if (!check_node)
|
||||
return false;
|
||||
|
||||
Request &req { *_req_ptr };
|
||||
Type_1_node const &node = _t1_blks.items[lvl].nodes[node_idx];
|
||||
switch (_state) {
|
||||
case REQ_IN_PROGRESS:
|
||||
|
||||
if (lvl == 1) {
|
||||
if (!_num_remaining_leaves) {
|
||||
if (node.valid()) {
|
||||
_mark_req_failed(progress, { "lvl ", lvl, " node ", node_idx, " (", node,
|
||||
") valid but no leaves remaining" });
|
||||
break;
|
||||
}
|
||||
check_node = false;
|
||||
progress = true;
|
||||
if (VERBOSE_CHECK)
|
||||
log(Level_indent { lvl, req._vbd.max_lvl }, " lvl ", lvl, " node ", node_idx, ": expectedly invalid");
|
||||
break;
|
||||
}
|
||||
if (node.gen == INITIAL_GENERATION) {
|
||||
_num_remaining_leaves--;
|
||||
check_node = false;
|
||||
progress = true;
|
||||
if (VERBOSE_CHECK)
|
||||
log(Level_indent { lvl, req._vbd.max_lvl }, " lvl ", lvl, " node ", node_idx, ": uninitialized");
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
if (!node.valid()) {
|
||||
if (_num_remaining_leaves) {
|
||||
_mark_req_failed(progress, { "lvl ", lvl, " node ", node_idx, " invalid but ",
|
||||
_num_remaining_leaves, " leaves remaining" });
|
||||
break;
|
||||
}
|
||||
check_node = false;
|
||||
progress = true;
|
||||
if (VERBOSE_CHECK)
|
||||
log(Level_indent { lvl, req._vbd.max_lvl }, " lvl ", lvl, " node ", node_idx, ": expectedly invalid");
|
||||
break;
|
||||
}
|
||||
}
|
||||
_generate_req<Block_io::Read>(READ_BLK_SUCCEEDED, progress, node.pba, _blk);
|
||||
if (VERBOSE_CHECK)
|
||||
log(Level_indent { lvl, req._vbd.max_lvl }, " lvl ", lvl, " node ", node_idx, " (", node,
|
||||
"): load to lvl ", lvl - 1);
|
||||
break;
|
||||
|
||||
case READ_BLK_SUCCEEDED:
|
||||
|
||||
if (!(lvl > 1 && node.gen == INITIAL_GENERATION) && !check_hash(_blk, node.hash)) {
|
||||
_mark_req_failed(progress, { "lvl ", lvl, " node ", node_idx, " (", node, ") has bad hash" });
|
||||
break;
|
||||
}
|
||||
if (lvl == 1)
|
||||
_num_remaining_leaves--;
|
||||
else {
|
||||
_t1_blks.items[lvl - 1].decode_from_blk(_blk);
|
||||
for (bool &cn : _check_node[lvl - 1])
|
||||
cn = true;
|
||||
}
|
||||
check_node = false;
|
||||
_state = REQ_IN_PROGRESS;
|
||||
progress = true;
|
||||
if (VERBOSE_CHECK)
|
||||
log(Level_indent { lvl, req._vbd.max_lvl }, " lvl ", lvl, " node ", node_idx, ": good hash");
|
||||
break;
|
||||
|
||||
default: break;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
void Vbd_check_channel::execute(bool &progress)
|
||||
{
|
||||
if (!_req_ptr)
|
||||
return;
|
||||
|
||||
Request &req { *_req_ptr };
|
||||
if (_state == REQ_SUBMITTED) {
|
||||
for (Tree_level_index lvl { 1 }; lvl <= req._vbd.max_lvl + 1; lvl++)
|
||||
for (Tree_node_index node_idx { 0 }; node_idx < req._vbd.degree; node_idx++)
|
||||
_check_node[lvl][node_idx] = false;
|
||||
|
||||
_num_remaining_leaves = req._vbd.num_leaves;
|
||||
_t1_blks.items[req._vbd.max_lvl + 1].nodes[0] = req._vbd.t1_node();
|
||||
_check_node[req._vbd.max_lvl + 1][0] = true;
|
||||
_state = REQ_IN_PROGRESS;
|
||||
}
|
||||
for (Tree_level_index lvl { 1 }; lvl <= req._vbd.max_lvl + 1; lvl++)
|
||||
for (Tree_node_index node_idx { 0 }; node_idx < req._vbd.degree; node_idx++)
|
||||
if (_execute_node(lvl, node_idx, progress))
|
||||
return;
|
||||
|
||||
_mark_req_successful(progress);
|
||||
}
|
||||
|
||||
|
||||
void Vbd_check_channel::_mark_req_failed(bool &progress, Error_string str)
|
||||
{
|
||||
error("vbd check request (", *_req_ptr, ") failed: ", str);
|
||||
_req_ptr->_success = false;
|
||||
_state = REQ_COMPLETE;
|
||||
_req_ptr = nullptr;
|
||||
progress = true;
|
||||
}
|
||||
|
||||
|
||||
bool Vbd_check::_peek_completed_request(uint8_t *buf_ptr,
|
||||
size_t buf_size)
|
||||
void Vbd_check_channel::_mark_req_successful(bool &progress)
|
||||
{
|
||||
for (Channel &chan : _channels) {
|
||||
|
||||
if (chan._request._type != Request::INVALID &&
|
||||
chan._root_state == Channel::DONE) {
|
||||
|
||||
if (sizeof(chan._request) > buf_size) {
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
memcpy(buf_ptr, &chan._request, sizeof(chan._request));
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
_req_ptr->_success = true;
|
||||
_state = REQ_COMPLETE;
|
||||
_req_ptr = nullptr;
|
||||
progress = true;
|
||||
}
|
||||
|
||||
|
||||
void Vbd_check::_drop_completed_request(Module_request &req)
|
||||
void Vbd_check_channel::_request_submitted(Module_request &mod_req)
|
||||
{
|
||||
Module_request_id id { 0 };
|
||||
id = req.dst_request_id();
|
||||
if (id >= NR_OF_CHANNELS) {
|
||||
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
Channel &chan { _channels[id] };
|
||||
if (chan._request._type == Request::INVALID &&
|
||||
chan._root_state != Channel::DONE) {
|
||||
|
||||
class Exception_2 { };
|
||||
throw Exception_2 { };
|
||||
}
|
||||
chan = Channel { };
|
||||
_req_ptr = static_cast<Request *>(&mod_req);
|
||||
_state = REQ_SUBMITTED;
|
||||
}
|
||||
|
||||
|
||||
bool Vbd_check::_peek_generated_request(uint8_t *buf_ptr,
|
||||
size_t buf_size)
|
||||
Vbd_check::Vbd_check()
|
||||
{
|
||||
for (Module_request_id id { 0 }; id < NR_OF_CHANNELS; id++) {
|
||||
|
||||
Channel &chan { _channels[id] };
|
||||
|
||||
if (!chan._gen_prim.valid() || chan._gen_prim.dropped)
|
||||
continue;
|
||||
|
||||
switch (chan._gen_prim.tag) {
|
||||
case Channel::BLOCK_IO:
|
||||
|
||||
construct_in_buf<Block_io_request>(
|
||||
buf_ptr, buf_size, VBD_CHECK, id,
|
||||
Block_io_request::READ, 0, 0, 0,
|
||||
chan._gen_prim.blk_nr, 0, 1,
|
||||
chan._lvl_to_read == 0 ?
|
||||
(void *)&chan._leaf_lvl :
|
||||
(void *)&chan._encoded_blk,
|
||||
nullptr);
|
||||
|
||||
return true;
|
||||
|
||||
default:
|
||||
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
Module_channel_id id { 0 };
|
||||
for (Constructible<Channel> &chan : _channels) {
|
||||
chan.construct(id++);
|
||||
add_channel(*chan);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
void Vbd_check::_drop_generated_request(Module_request &req)
|
||||
{
|
||||
Module_request_id const id { req.src_request_id() };
|
||||
if (id >= NR_OF_CHANNELS) {
|
||||
class Exception_0 { };
|
||||
throw Exception_0 { };
|
||||
}
|
||||
Channel &chan { _channels[id] };
|
||||
chan._gen_prim.dropped = true;
|
||||
}
|
||||
|
||||
|
||||
void Vbd_check::generated_request_complete(Module_request &mod_req)
|
||||
{
|
||||
Module_request_id const id { mod_req.src_request_id() };
|
||||
if (id >= NR_OF_CHANNELS) {
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
Channel &chan { _channels[id] };
|
||||
switch (mod_req.dst_module_id()) {
|
||||
case BLOCK_IO:
|
||||
{
|
||||
Block_io_request &gen_req { *static_cast<Block_io_request*>(&mod_req) };
|
||||
chan._gen_prim.success = gen_req.success();
|
||||
if (chan._lvl_to_read > 0)
|
||||
chan._t1_lvls[chan._lvl_to_read].children.decode_from_blk(chan._encoded_blk);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
class Exception_8 { };
|
||||
throw Exception_8 { };
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool Vbd_check::ready_to_submit_request()
|
||||
{
|
||||
for (Channel &chan : _channels) {
|
||||
if (chan._request._type == Request::INVALID)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
void Vbd_check::submit_request(Module_request &req)
|
||||
{
|
||||
for (Module_request_id id { 0 }; id < NR_OF_CHANNELS; id++) {
|
||||
Channel &chan { _channels[id] };
|
||||
if (chan._request._type == Request::INVALID) {
|
||||
req.dst_request_id(id);
|
||||
chan._request = *static_cast<Request *>(&req);
|
||||
chan._root_state = Channel::READ_BLOCK;
|
||||
return;
|
||||
}
|
||||
}
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
|
||||
|
||||
void Vbd_check::execute(bool &progress)
|
||||
{
|
||||
for (Channel &chan : _channels) {
|
||||
|
||||
Request &req { chan._request };
|
||||
switch (req._type) {
|
||||
case Request::CHECK:
|
||||
|
||||
_execute_check(chan, progress);
|
||||
break;
|
||||
|
||||
default:
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
for_each_channel<Channel>([&] (Channel &chan) {
|
||||
chan.execute(progress); });
|
||||
}
|
||||
|
@ -1,5 +1,6 @@
|
||||
/*
|
||||
* \brief Module for initializing the VBD
|
||||
* \author Martin Stein
|
||||
* \author Josef Soentgen
|
||||
* \date 2023-03-03
|
||||
*/
|
||||
@ -11,596 +12,194 @@
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
/* base includes */
|
||||
#include <base/log.h>
|
||||
|
||||
/* tresor includes */
|
||||
#include <tresor/block_allocator.h>
|
||||
#include <tresor/block_io.h>
|
||||
#include <tresor/sha256_4k_hash.h>
|
||||
#include <tresor/hash.h>
|
||||
#include <tresor/vbd_initializer.h>
|
||||
|
||||
using namespace Tresor;
|
||||
|
||||
static constexpr bool DEBUG = false;
|
||||
|
||||
|
||||
Vbd_initializer_request::Vbd_initializer_request(Module_id src_module_id,
|
||||
Module_request_id src_request_id)
|
||||
Vbd_initializer_request::Vbd_initializer_request(Module_id src_mod, Module_channel_id src_chan, Tree_root &vbd,
|
||||
Pba_allocator &pba_alloc, bool &success)
|
||||
:
|
||||
Module_request { src_module_id, src_request_id, VBD_INITIALIZER }
|
||||
Module_request { src_mod, src_chan, VBD_INITIALIZER }, _vbd { vbd }, _pba_alloc { pba_alloc }, _success { success }
|
||||
{ }
|
||||
|
||||
|
||||
void Vbd_initializer_request::create(void *buf_ptr,
|
||||
size_t buf_size,
|
||||
uint64_t src_module_id,
|
||||
uint64_t src_request_id,
|
||||
size_t req_type,
|
||||
uint64_t max_level_idx,
|
||||
uint64_t max_child_idx,
|
||||
uint64_t nr_of_leaves)
|
||||
bool Vbd_initializer_channel::_execute_node(Tree_level_index lvl, Tree_node_index node_idx, bool &progress)
|
||||
{
|
||||
Vbd_initializer_request req { src_module_id, src_request_id };
|
||||
req._type = (Type)req_type;
|
||||
Type_1_node &node = _t1_blks.items[lvl].nodes[node_idx];
|
||||
Node_state &node_state = _node_states[lvl][node_idx];
|
||||
switch (node_state) {
|
||||
case DONE: return false;
|
||||
case INIT_BLOCK:
|
||||
|
||||
req._max_level_idx = max_level_idx;
|
||||
req._max_child_idx = max_child_idx;
|
||||
req._nr_of_leaves = nr_of_leaves;
|
||||
|
||||
if (sizeof(req) > buf_size) {
|
||||
class Bad_size_0 { };
|
||||
throw Bad_size_0 { };
|
||||
}
|
||||
memcpy(buf_ptr, &req, sizeof(req));
|
||||
}
|
||||
|
||||
|
||||
char const *Vbd_initializer_request::type_to_string(Type type)
|
||||
{
|
||||
switch (type) {
|
||||
case INVALID: return "invalid";
|
||||
case INIT: return "init";
|
||||
}
|
||||
return "?";
|
||||
}
|
||||
|
||||
|
||||
void Vbd_initializer::_execute_leaf_child(Channel &channel,
|
||||
bool &progress,
|
||||
uint64_t &nr_of_leaves,
|
||||
Type_1_node &child,
|
||||
Vbd_initializer_channel::Child_state &child_state,
|
||||
uint64_t level_index,
|
||||
uint64_t child_index)
|
||||
{
|
||||
using CS = Vbd_initializer_channel::Child_state;
|
||||
|
||||
switch (child_state) {
|
||||
case CS::INIT_BLOCK:
|
||||
child_state = CS::INIT_NODE;
|
||||
progress = true;
|
||||
return;
|
||||
|
||||
case CS::INIT_NODE:
|
||||
if (nr_of_leaves == 0) {
|
||||
|
||||
if (DEBUG)
|
||||
log("[vbd_init] node: ", level_index, " ", child_index,
|
||||
" assign pba 0, leaf unused");
|
||||
|
||||
Vbd_initializer_channel::reset_node(child);
|
||||
child_state = CS::DONE;
|
||||
if (lvl == 1) {
|
||||
node_state = INIT_NODE;
|
||||
progress = true;
|
||||
} else {
|
||||
|
||||
switch (channel._state) {
|
||||
case Channel::IN_PROGRESS:
|
||||
channel._state = Channel::BLOCK_ALLOC_PENDING;
|
||||
} else
|
||||
if (_num_remaining_leaves) {
|
||||
_reset_level(lvl - 1, INIT_BLOCK);
|
||||
node_state = INIT_NODE;
|
||||
progress = true;
|
||||
break;
|
||||
if (VERBOSE_VBD_INIT)
|
||||
log("[vbd_init] node: ", lvl, " ", node_idx, " reset level: ", lvl - 1);
|
||||
} else {
|
||||
node = { };
|
||||
node_state = DONE;
|
||||
progress = true;
|
||||
if (VERBOSE_VBD_INIT)
|
||||
log("[vbd_init] node: ", lvl, " ", node_idx, " assign pba 0, inner node unused");
|
||||
}
|
||||
break;
|
||||
|
||||
case Channel::BLOCK_ALLOC_PENDING:
|
||||
break;
|
||||
case INIT_NODE:
|
||||
|
||||
case Channel::BLOCK_ALLOC_IN_PROGRESS:
|
||||
break;
|
||||
|
||||
case Channel::BLOCK_ALLOC_COMPLETE:
|
||||
/* bail early in case the allocator failed */
|
||||
if (!channel._generated_req_success) {
|
||||
_mark_req_failed(channel, progress,
|
||||
"allocate block for VBD initialization");
|
||||
if (lvl == 1)
|
||||
if (_num_remaining_leaves) {
|
||||
node = { };
|
||||
if (!_req_ptr->_pba_alloc.alloc(node.pba)) {
|
||||
_mark_req_failed(progress, "allocate pba");
|
||||
break;
|
||||
}
|
||||
channel._state = Channel::IN_PROGRESS;
|
||||
|
||||
Vbd_initializer_channel::reset_node(child);
|
||||
|
||||
child.pba = channel._blk_nr;
|
||||
child_state = CS::DONE;
|
||||
--nr_of_leaves;
|
||||
node_state = DONE;
|
||||
_num_remaining_leaves--;
|
||||
progress = true;
|
||||
|
||||
if (DEBUG)
|
||||
log("[vbd_init] node: ", level_index, " ", child_index,
|
||||
" assign pba: ", channel._blk_nr, " leaves left: ",
|
||||
nr_of_leaves);
|
||||
break;
|
||||
|
||||
default:
|
||||
if (VERBOSE_VBD_INIT)
|
||||
log("[vbd_init] node: ", lvl, " ", node_idx, " assign pba: ", node.pba, " leaves left: ", _num_remaining_leaves);
|
||||
} else {
|
||||
node = { };
|
||||
node_state = DONE;
|
||||
progress = true;
|
||||
if (VERBOSE_VBD_INIT)
|
||||
log("[vbd_init] node: ", lvl, " ", node_idx, " assign pba 0, leaf unused");
|
||||
}
|
||||
else {
|
||||
node = { };
|
||||
if (!_req_ptr->_pba_alloc.alloc(node.pba)) {
|
||||
_mark_req_failed(progress, "allocate pba");
|
||||
break;
|
||||
}
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Vbd_initializer::_execute_inner_t1_child(Channel &channel,
|
||||
bool &progress,
|
||||
uint64_t nr_of_leaves,
|
||||
uint64_t &level_to_write,
|
||||
Type_1_node &child,
|
||||
Vbd_initializer_channel::Type_1_level &child_level,
|
||||
Vbd_initializer_channel::Child_state &child_state,
|
||||
uint64_t level_index,
|
||||
uint64_t child_index)
|
||||
|
||||
{
|
||||
using CS = Vbd_initializer_channel::Child_state;
|
||||
|
||||
switch (child_state) {
|
||||
case CS::INIT_BLOCK:
|
||||
|
||||
if (nr_of_leaves == 0) {
|
||||
|
||||
if (DEBUG)
|
||||
log("[vbd_init] node: ", level_index, " ", child_index,
|
||||
" assign pba 0, inner node unused");
|
||||
|
||||
Vbd_initializer_channel::reset_node(child);
|
||||
child_state = CS::DONE;
|
||||
progress = true;
|
||||
return;
|
||||
} else {
|
||||
|
||||
if (DEBUG)
|
||||
log("[vbd_init] node: ", level_index, " ", child_index,
|
||||
" reset level: ", level_index - 1);
|
||||
|
||||
Vbd_initializer_channel::reset_level(child_level, CS::INIT_BLOCK);
|
||||
child_state = CS::INIT_NODE;
|
||||
progress = true;
|
||||
return;
|
||||
_t1_blks.items[lvl - 1].encode_to_blk(_blk);
|
||||
calc_hash(_blk, node.hash);
|
||||
node_state = WRITE_BLOCK;
|
||||
generate_req<Block_io::Write>(EXECUTE_NODES, progress, node.pba, _blk, _generated_req_success);
|
||||
_state = REQ_GENERATED;
|
||||
if (VERBOSE_VBD_INIT)
|
||||
log("[vbd_init] node: ", lvl, " ", node_idx, " assign pba: ", node.pba);
|
||||
}
|
||||
break;
|
||||
|
||||
case CS::INIT_NODE:
|
||||
case WRITE_BLOCK:
|
||||
|
||||
switch (channel._state) {
|
||||
case Channel::IN_PROGRESS:
|
||||
channel._state = Channel::BLOCK_ALLOC_PENDING;
|
||||
progress = true;
|
||||
break;
|
||||
|
||||
case Channel::BLOCK_ALLOC_PENDING:
|
||||
break;
|
||||
|
||||
case Channel::BLOCK_ALLOC_IN_PROGRESS:
|
||||
break;
|
||||
|
||||
case Channel::BLOCK_ALLOC_COMPLETE:
|
||||
{
|
||||
/* bail early in case the allocator failed */
|
||||
if (!channel._generated_req_success) {
|
||||
_mark_req_failed(channel, progress,
|
||||
"allocate block for VBD initialization");
|
||||
break;
|
||||
}
|
||||
channel._state = Channel::IN_PROGRESS;
|
||||
|
||||
Vbd_initializer_channel::reset_node(child);
|
||||
child.pba = channel._blk_nr;
|
||||
|
||||
Block blk { };
|
||||
child_level.children.encode_to_blk(blk);
|
||||
calc_sha256_4k_hash(blk, child.hash);
|
||||
|
||||
child_state = CS::WRITE_BLOCK;
|
||||
progress = true;
|
||||
|
||||
if (DEBUG)
|
||||
log("[vbd_init] node: ", level_index, " ", child_index,
|
||||
" assign pba: ", channel._blk_nr);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
||||
case CS::WRITE_BLOCK:
|
||||
|
||||
switch (channel._state) {
|
||||
case Channel::IN_PROGRESS:
|
||||
{
|
||||
channel._state = Channel::BLOCK_IO_PENDING;
|
||||
channel._child_pba = child.pba;
|
||||
level_to_write = level_index - 1;
|
||||
progress = true;
|
||||
break;
|
||||
}
|
||||
|
||||
case Channel::BLOCK_IO_PENDING:
|
||||
break;
|
||||
|
||||
case Channel::BLOCK_IO_IN_PROGRESS:
|
||||
break;
|
||||
|
||||
case Channel::BLOCK_IO_COMPLETE:
|
||||
/* bail early in case the allocator failed */
|
||||
if (!channel._generated_req_success) {
|
||||
_mark_req_failed(channel, progress,
|
||||
"write block for VBD initialization");
|
||||
break;
|
||||
}
|
||||
channel._state = Channel::IN_PROGRESS;
|
||||
|
||||
child_state = CS::DONE;
|
||||
progress = true;
|
||||
|
||||
if (DEBUG)
|
||||
log("[vbd_init] node: ", level_index, " ", child_index,
|
||||
" write pba: ", channel._child_pba, " level: ",
|
||||
level_index -1, " (child: ", child, ")");
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Vbd_initializer::_execute(Channel &channel,
|
||||
bool &progress)
|
||||
{
|
||||
Request &req { channel._request };
|
||||
|
||||
/*
|
||||
* First handle all child nodes (leaves and inner nodes) that starts after
|
||||
* triggering the root node below.
|
||||
*/
|
||||
for (uint64_t level_idx = 0; level_idx <= req._max_level_idx; level_idx++) {
|
||||
|
||||
for (uint64_t child_idx = 0; child_idx <= req._max_child_idx; child_idx++) {
|
||||
|
||||
Vbd_initializer_channel::Child_state &state =
|
||||
channel._t1_levels[level_idx].children_state[child_idx];
|
||||
|
||||
if (state != Vbd_initializer_channel::Child_state::DONE) {
|
||||
|
||||
Type_1_node &child =
|
||||
channel._t1_levels[level_idx].children.nodes[child_idx];
|
||||
|
||||
if (level_idx == 1) {
|
||||
_execute_leaf_child(channel, progress, req._nr_of_leaves,
|
||||
child, state, level_idx, child_idx);
|
||||
} else {
|
||||
|
||||
Vbd_initializer_channel::Type_1_level &t1_level =
|
||||
channel._t1_levels[level_idx - 1];
|
||||
|
||||
_execute_inner_t1_child(channel, progress,
|
||||
req._nr_of_leaves,
|
||||
channel._level_to_write,
|
||||
child, t1_level, state,
|
||||
level_idx, child_idx);
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Checking the root node will trigger the initialization process as
|
||||
* well as will finish it.
|
||||
*/
|
||||
if (channel._root_node.state != Vbd_initializer_channel::Child_state::DONE) {
|
||||
|
||||
Vbd_initializer_channel::Type_1_level &t1_level =
|
||||
channel._t1_levels[req._max_level_idx];
|
||||
|
||||
_execute_inner_t1_child(channel, progress,
|
||||
req._nr_of_leaves,
|
||||
channel._level_to_write,
|
||||
channel._root_node.node, t1_level, channel._root_node.state,
|
||||
req._max_level_idx + 1, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* We will end up here when the root state is 'DONE'.
|
||||
*/
|
||||
if (req._nr_of_leaves == 0)
|
||||
_mark_req_successful(channel, progress);
|
||||
else
|
||||
_mark_req_failed(channel, progress, "initialize VBD");
|
||||
}
|
||||
|
||||
|
||||
void Vbd_initializer::_execute_init(Channel &channel,
|
||||
bool &progress)
|
||||
{
|
||||
switch (channel._state) {
|
||||
case Channel::SUBMITTED:
|
||||
|
||||
/* clean residual state */
|
||||
for (unsigned int i = 0; i < TREE_MAX_LEVEL; i++) {
|
||||
Vbd_initializer_channel::reset_level(channel._t1_levels[i],
|
||||
Vbd_initializer_channel::Child_state::DONE);
|
||||
}
|
||||
channel._level_to_write = 0;
|
||||
|
||||
channel._state = Channel::PENDING;
|
||||
channel._root_node.state = Vbd_initializer_channel::Child_state::INIT_BLOCK;
|
||||
ASSERT(lvl > 1);
|
||||
node_state = DONE;
|
||||
progress = true;
|
||||
|
||||
return;
|
||||
|
||||
case Channel::PENDING:
|
||||
|
||||
channel._state = Channel::IN_PROGRESS;
|
||||
progress = true;
|
||||
return;
|
||||
|
||||
case Channel::IN_PROGRESS:
|
||||
|
||||
_execute(channel, progress);
|
||||
return;
|
||||
|
||||
case Channel::BLOCK_ALLOC_COMPLETE:
|
||||
|
||||
_execute(channel, progress);
|
||||
return;
|
||||
|
||||
case Channel::BLOCK_IO_COMPLETE:
|
||||
|
||||
_execute(channel, progress);
|
||||
return;
|
||||
|
||||
default:
|
||||
/*
|
||||
* Omit other states related to ALLOC and IO as those
|
||||
* are handled via Module API.
|
||||
*/
|
||||
return;
|
||||
if (VERBOSE_VBD_INIT)
|
||||
log("[vbd_init] node: ", lvl, " ", node_idx, " write pba: ", node.pba, " level: ", lvl - 1, " (node: ", node, ")");
|
||||
break;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
void Vbd_initializer::_mark_req_failed(Channel &channel,
|
||||
bool &progress,
|
||||
char const *str)
|
||||
void Vbd_initializer_channel::_generated_req_completed(State_uint state_uint)
|
||||
{
|
||||
error("request failed: failed to ", str);
|
||||
channel._request._success = false;
|
||||
channel._state = Channel::COMPLETE;
|
||||
if (!_generated_req_success) {
|
||||
error("vbd initializer: request (", *_req_ptr, ") failed because generated request failed)");
|
||||
_req_ptr->_success = false;
|
||||
_state = COMPLETE;
|
||||
_req_ptr = nullptr;
|
||||
return;
|
||||
}
|
||||
_state = (State)state_uint;
|
||||
}
|
||||
|
||||
|
||||
void Vbd_initializer_channel::_mark_req_failed(bool &progress, char const *str)
|
||||
{
|
||||
error("vbd_initializer request (", *_req_ptr, ") failed because: ", str);
|
||||
_req_ptr->_success = false;
|
||||
_state = COMPLETE;
|
||||
_req_ptr = nullptr;
|
||||
progress = true;
|
||||
}
|
||||
|
||||
|
||||
void Vbd_initializer::_mark_req_successful(Channel &channel,
|
||||
bool &progress)
|
||||
void Vbd_initializer_channel::_mark_req_successful(bool &progress)
|
||||
{
|
||||
Request &req { channel._request };
|
||||
|
||||
memcpy(req._root_node, &channel._root_node.node, sizeof (req._root_node));
|
||||
req._success = true;
|
||||
|
||||
channel._state = Channel::COMPLETE;
|
||||
_req_ptr->_vbd.t1_node(_t1_blks.items[_req_ptr->_vbd.max_lvl + 1].nodes[0]);
|
||||
_req_ptr->_success = true;
|
||||
_state = COMPLETE;
|
||||
_req_ptr = nullptr;
|
||||
progress = true;
|
||||
}
|
||||
|
||||
|
||||
bool Vbd_initializer::_peek_completed_request(uint8_t *buf_ptr,
|
||||
size_t buf_size)
|
||||
void Vbd_initializer_channel::_request_submitted(Module_request &mod_req)
|
||||
{
|
||||
for (Channel &channel : _channels) {
|
||||
if (channel._state == Channel::COMPLETE) {
|
||||
if (sizeof(channel._request) > buf_size) {
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
memcpy(buf_ptr, &channel._request, sizeof(channel._request));
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
_req_ptr = static_cast<Request *>(&mod_req);
|
||||
_state = SUBMITTED;
|
||||
}
|
||||
|
||||
|
||||
void Vbd_initializer::_drop_completed_request(Module_request &req)
|
||||
void Vbd_initializer_channel::execute(bool &progress)
|
||||
{
|
||||
Module_request_id id { 0 };
|
||||
id = req.dst_request_id();
|
||||
if (id >= NR_OF_CHANNELS) {
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
if (_channels[id]._state != Channel::COMPLETE) {
|
||||
class Exception_2 { };
|
||||
throw Exception_2 { };
|
||||
}
|
||||
_channels[id]._state = Channel::INACTIVE;
|
||||
}
|
||||
if (!_req_ptr)
|
||||
return;
|
||||
|
||||
Request &req { *_req_ptr };
|
||||
switch (_state) {
|
||||
case SUBMITTED:
|
||||
|
||||
bool Vbd_initializer::_peek_generated_request(uint8_t *buf_ptr,
|
||||
size_t buf_size)
|
||||
{
|
||||
for (Module_request_id id { 0 }; id < NR_OF_CHANNELS; id++) {
|
||||
_num_remaining_leaves = req._vbd.num_leaves;
|
||||
for (Tree_level_index lvl = 0; lvl < TREE_MAX_LEVEL; lvl++)
|
||||
_reset_level(lvl, Vbd_initializer_channel::DONE);
|
||||
|
||||
Channel &channel { _channels[id] };
|
||||
_node_states[req._vbd.max_lvl + 1][0] = Vbd_initializer_channel::INIT_BLOCK;
|
||||
_state = EXECUTE_NODES;
|
||||
progress = true;
|
||||
return;
|
||||
|
||||
if (channel._state != Vbd_initializer_channel::State::INACTIVE)
|
||||
case EXECUTE_NODES:
|
||||
|
||||
switch (channel._state) {
|
||||
case Vbd_initializer_channel::State::BLOCK_ALLOC_PENDING:
|
||||
{
|
||||
Block_allocator_request::Type const block_allocator_req_type {
|
||||
Block_allocator_request::GET };
|
||||
for (Tree_level_index lvl = 0; lvl <= req._vbd.max_lvl + 1; lvl++)
|
||||
for (Tree_node_index node_idx = 0; node_idx < req._vbd.degree; node_idx++)
|
||||
if (_execute_node(lvl, node_idx, progress))
|
||||
return;
|
||||
|
||||
Block_allocator_request::create(
|
||||
buf_ptr, buf_size, VBD_INITIALIZER, id,
|
||||
block_allocator_req_type);
|
||||
if (_num_remaining_leaves)
|
||||
_mark_req_failed(progress, "leaves remaining");
|
||||
else
|
||||
_mark_req_successful(progress);
|
||||
return;
|
||||
|
||||
return true;
|
||||
}
|
||||
case Vbd_initializer_channel::State::BLOCK_IO_PENDING:
|
||||
{
|
||||
Block_io_request::Type const block_io_req_type {
|
||||
Block_io_request::WRITE };
|
||||
|
||||
channel._t1_levels[channel._level_to_write].children.encode_to_blk(channel._encoded_blk);
|
||||
construct_in_buf<Block_io_request>(
|
||||
buf_ptr, buf_size, VBD_INITIALIZER, id,
|
||||
block_io_req_type, 0, 0, 0,
|
||||
channel._child_pba, 0, 1, &channel._encoded_blk, nullptr);
|
||||
|
||||
if (DEBUG) {
|
||||
log("BLOCK_IO_PENDING write ", channel._child_pba);
|
||||
Vbd_initializer_channel::dump(channel._t1_levels[channel._level_to_write].children);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
void Vbd_initializer::_drop_generated_request(Module_request &req)
|
||||
{
|
||||
Module_request_id const id { req.src_request_id() };
|
||||
if (id >= NR_OF_CHANNELS) {
|
||||
class Bad_id { };
|
||||
throw Bad_id { };
|
||||
}
|
||||
switch (_channels[id]._state) {
|
||||
case Vbd_initializer_channel::State::BLOCK_ALLOC_PENDING:
|
||||
_channels[id]._state = Vbd_initializer_channel::State::BLOCK_ALLOC_IN_PROGRESS;
|
||||
break;
|
||||
case Vbd_initializer_channel::State::BLOCK_IO_PENDING:
|
||||
_channels[id]._state = Vbd_initializer_channel::State::BLOCK_IO_IN_PROGRESS;
|
||||
break;
|
||||
default:
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
default: return;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Vbd_initializer::generated_request_complete(Module_request &mod_req)
|
||||
void Vbd_initializer_channel::_reset_level(Tree_level_index lvl, Node_state state)
|
||||
{
|
||||
Module_request_id const id { mod_req.src_request_id() };
|
||||
if (id >= NR_OF_CHANNELS) {
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
switch (mod_req.dst_module_id()) {
|
||||
case BLOCK_ALLOCATOR:
|
||||
{
|
||||
Block_allocator_request const &gen_req { *static_cast<Block_allocator_request *>(&mod_req) };
|
||||
switch (_channels[id]._state) {
|
||||
case Channel::BLOCK_ALLOC_IN_PROGRESS:
|
||||
_channels[id]._state = Channel::BLOCK_ALLOC_COMPLETE;
|
||||
_channels[id]._blk_nr = gen_req.blk_nr();
|
||||
_channels[id]._generated_req_success = gen_req.success();
|
||||
break;
|
||||
default:
|
||||
class Exception_2 { };
|
||||
throw Exception_2 { };
|
||||
}
|
||||
break;
|
||||
}
|
||||
case BLOCK_IO:
|
||||
{
|
||||
Block_io_request const &gen_req { *static_cast<Block_io_request *>(&mod_req) };
|
||||
switch (_channels[id]._state) {
|
||||
case Channel::BLOCK_IO_IN_PROGRESS:
|
||||
_channels[id]._state = Channel::BLOCK_IO_COMPLETE;
|
||||
_channels[id]._generated_req_success = gen_req.success();
|
||||
break;
|
||||
default:
|
||||
class Exception_2 { };
|
||||
throw Exception_2 { };
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
class Exception_2 { };
|
||||
throw Exception_2 { };
|
||||
for (unsigned int idx = 0; idx < NUM_NODES_PER_BLK; idx++) {
|
||||
_t1_blks.items[lvl].nodes[idx] = { };
|
||||
_node_states[lvl][idx] = state;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Vbd_initializer::Vbd_initializer()
|
||||
{ }
|
||||
|
||||
|
||||
bool Vbd_initializer::ready_to_submit_request()
|
||||
{
|
||||
for (Channel &channel : _channels) {
|
||||
if (channel._state == Channel::INACTIVE)
|
||||
return true;
|
||||
Module_channel_id id { 0 };
|
||||
for (Constructible<Channel> &chan : _channels) {
|
||||
chan.construct(id++);
|
||||
add_channel(*chan);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
void Vbd_initializer::submit_request(Module_request &req)
|
||||
{
|
||||
for (Module_request_id id { 0 }; id < NR_OF_CHANNELS; id++) {
|
||||
if (_channels[id]._state == Channel::INACTIVE) {
|
||||
req.dst_request_id(id);
|
||||
_channels[id]._request = *static_cast<Request *>(&req);
|
||||
_channels[id]._state = Channel::SUBMITTED;
|
||||
return;
|
||||
}
|
||||
}
|
||||
class Invalid_call { };
|
||||
throw Invalid_call { };
|
||||
}
|
||||
|
||||
|
||||
void Vbd_initializer::execute(bool &progress)
|
||||
{
|
||||
for (Channel &channel : _channels) {
|
||||
|
||||
if (channel._state == Channel::INACTIVE)
|
||||
continue;
|
||||
|
||||
Request &req { channel._request };
|
||||
switch (req._type) {
|
||||
case Request::INIT:
|
||||
|
||||
_execute_init(channel, progress);
|
||||
|
||||
break;
|
||||
default:
|
||||
|
||||
class Exception_1 { };
|
||||
throw Exception_1 { };
|
||||
}
|
||||
}
|
||||
for_each_channel<Channel>([&] (Channel &chan) {
|
||||
chan.execute(progress); });
|
||||
}
|
||||
|
@ -1,55 +0,0 @@
|
||||
/*
|
||||
* \brief Utilities for a more convenient use of the VFS
|
||||
* \author Martin Stein
|
||||
* \date 2020-10-29
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2020 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
/* tresor includes */
|
||||
#include <tresor/vfs_utilities.h>
|
||||
|
||||
using namespace Genode;
|
||||
using namespace Vfs;
|
||||
|
||||
|
||||
/**********************
|
||||
** Global functions **
|
||||
**********************/
|
||||
|
||||
Vfs::Vfs_handle &vfs_open(Vfs::Env &vfs_env,
|
||||
String<128> path,
|
||||
Vfs::Directory_service::Open_mode mode)
|
||||
{
|
||||
Vfs_handle *handle { nullptr };
|
||||
Directory_service::Open_result const result {
|
||||
vfs_env.root_dir().open(
|
||||
path.string(), mode, &handle, vfs_env.alloc()) };
|
||||
|
||||
if (result != Directory_service::Open_result::OPEN_OK) {
|
||||
|
||||
error("failed to open file ", path.string());
|
||||
class Failed { };
|
||||
throw Failed { };
|
||||
}
|
||||
return *handle;
|
||||
}
|
||||
|
||||
|
||||
Vfs_handle &vfs_open_wo(Vfs::Env &vfs_env,
|
||||
String<128> path)
|
||||
{
|
||||
return vfs_open(vfs_env, path, Directory_service::OPEN_MODE_WRONLY);
|
||||
}
|
||||
|
||||
|
||||
Vfs::Vfs_handle &vfs_open_rw(Vfs::Env &vfs_env,
|
||||
String<128> path)
|
||||
{
|
||||
return vfs_open(vfs_env, path, Directory_service::OPEN_MODE_RDWR);
|
||||
}
|
File diff suppressed because it is too large
Load Diff
225
repos/gems/src/lib/vfs/tresor/splitter.cc
Normal file
225
repos/gems/src/lib/vfs/tresor/splitter.cc
Normal file
@ -0,0 +1,225 @@
|
||||
/*
|
||||
* \brief Module for splitting unaligned/uneven I/O requests
|
||||
* \author Martin Stein
|
||||
* \author Josef Soentgen
|
||||
* \date 2023-09-11
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2023 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
/* vfs tresor includes */
|
||||
#include <splitter.h>
|
||||
|
||||
using namespace Tresor;
|
||||
|
||||
Splitter_request::Splitter_request(Module_id src_mod, Module_channel_id src_chan, Operation op, bool &success,
|
||||
Request_offset off, Byte_range_ptr const &buf, Key_id key_id, Generation gen)
|
||||
:
|
||||
Module_request { src_mod, src_chan, SPLITTER }, _op { op }, _off { off }, _key_id { key_id }, _gen { gen },
|
||||
_buf { buf.start, buf.num_bytes }, _success { success }
|
||||
{ }
|
||||
|
||||
|
||||
char const *Splitter_request::op_to_string(Operation op)
|
||||
{
|
||||
switch (op) {
|
||||
case Operation::READ: return "read";
|
||||
case Operation::WRITE: return "write";
|
||||
}
|
||||
ASSERT_NEVER_REACHED;
|
||||
}
|
||||
|
||||
|
||||
void Splitter_channel::_generated_req_completed(State_uint state_uint)
|
||||
{
|
||||
if (!_generated_req_success) {
|
||||
error("splitter: request (", *_req_ptr, ") failed because generated request failed)");
|
||||
_req_ptr->_success = false;
|
||||
_state = REQ_COMPLETE;
|
||||
_req_ptr = nullptr;
|
||||
return;
|
||||
}
|
||||
_state = (State)state_uint;
|
||||
}
|
||||
|
||||
|
||||
void Splitter_channel::_mark_req_successful(bool &progress)
|
||||
{
|
||||
Request &req { *_req_ptr };
|
||||
req._success = true;
|
||||
_state = REQ_COMPLETE;
|
||||
_req_ptr = nullptr;
|
||||
progress = true;
|
||||
}
|
||||
|
||||
|
||||
void Splitter_channel::_request_submitted(Module_request &req)
|
||||
{
|
||||
_req_ptr = static_cast<Request*>(&req);
|
||||
_state = REQ_SUBMITTED;
|
||||
}
|
||||
|
||||
|
||||
void Splitter_channel::_advance_curr_off(addr_t advance, Tresor::Request::Operation op, bool &progress)
|
||||
{
|
||||
Splitter_request &req { *_req_ptr };
|
||||
_curr_off += advance;
|
||||
if (!_num_remaining_bytes()) {
|
||||
_mark_req_successful(progress);
|
||||
} else if (_curr_off % BLOCK_SIZE) {
|
||||
_curr_buf_addr = (addr_t)&_blk;
|
||||
_generate_req<Tresor::Request>(
|
||||
PROTRUDING_FIRST_BLK_READ, progress, Tresor::Request::READ, _curr_vba(), 0, 1, req._key_id, id(), _gen);
|
||||
} else if (_num_remaining_bytes() < BLOCK_SIZE) {
|
||||
_curr_buf_addr = (addr_t)&_blk;
|
||||
_generate_req<Tresor::Request>(
|
||||
PROTRUDING_LAST_BLK_READ, progress, Tresor::Request::READ, _curr_vba(), 0, 1, req._key_id, id(), _gen);
|
||||
} else {
|
||||
_curr_buf_addr = (addr_t)req._buf.start + _curr_buf_off();
|
||||
_generate_req<Tresor::Request>(
|
||||
INSIDE_BLKS_ACCESSED, progress, op, _curr_vba(), 0, _num_remaining_bytes() / BLOCK_SIZE, req._key_id, id(), _gen);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Splitter_channel::_write(bool &progress)
|
||||
{
|
||||
Splitter_request &req { *_req_ptr };
|
||||
switch (_state) {
|
||||
case REQ_SUBMITTED:
|
||||
|
||||
_curr_off = 0;
|
||||
_gen = req._gen;
|
||||
_advance_curr_off(req._off, Tresor::Request::WRITE, progress);
|
||||
break;
|
||||
|
||||
case PROTRUDING_FIRST_BLK_READ:
|
||||
{
|
||||
size_t num_outside_bytes { _curr_off % BLOCK_SIZE };
|
||||
size_t num_inside_bytes { min(_num_remaining_bytes(), BLOCK_SIZE - num_outside_bytes) };
|
||||
memcpy((void *)((addr_t)&_blk + num_outside_bytes), req._buf.start, num_inside_bytes);
|
||||
_curr_buf_addr = (addr_t)&_blk;
|
||||
_generate_req<Tresor::Request>(
|
||||
PROTRUDING_FIRST_BLK_WRITTEN, progress, Tresor::Request::WRITE, _curr_vba(), 0, 1, req._key_id, id(), _gen);
|
||||
break;
|
||||
}
|
||||
case PROTRUDING_FIRST_BLK_WRITTEN:
|
||||
{
|
||||
size_t num_outside_bytes { _curr_off % BLOCK_SIZE };
|
||||
size_t num_inside_bytes { min(_num_remaining_bytes(), BLOCK_SIZE - num_outside_bytes) };
|
||||
_advance_curr_off(num_inside_bytes, Tresor::Request::WRITE, progress);
|
||||
break;
|
||||
}
|
||||
case INSIDE_BLKS_ACCESSED:
|
||||
|
||||
_advance_curr_off((_num_remaining_bytes() / BLOCK_SIZE) * BLOCK_SIZE, Tresor::Request::WRITE, progress);
|
||||
break;
|
||||
|
||||
case PROTRUDING_LAST_BLK_READ:
|
||||
|
||||
memcpy(&_blk, (void *)((addr_t)req._buf.start + _curr_buf_off()), _num_remaining_bytes());
|
||||
_curr_buf_addr = (addr_t)&_blk;
|
||||
_generate_req<Tresor::Request>(
|
||||
PROTRUDING_LAST_BLK_WRITTEN, progress, Tresor::Request::WRITE, _curr_vba(), 0, 1, req._key_id, id(), _gen);
|
||||
break;
|
||||
|
||||
case PROTRUDING_LAST_BLK_WRITTEN: _advance_curr_off(_num_remaining_bytes(), Tresor::Request::WRITE, progress); break;
|
||||
default: break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Splitter_channel::_read(bool &progress)
|
||||
{
|
||||
Splitter_request &req { *_req_ptr };
|
||||
switch (_state) {
|
||||
case REQ_SUBMITTED:
|
||||
|
||||
_curr_off = 0;
|
||||
_gen = req._gen;
|
||||
_advance_curr_off(req._off, Tresor::Request::READ, progress);
|
||||
break;
|
||||
|
||||
case PROTRUDING_FIRST_BLK_READ:
|
||||
{
|
||||
size_t num_outside_bytes { _curr_off % BLOCK_SIZE };
|
||||
size_t num_inside_bytes { min(_num_remaining_bytes(), BLOCK_SIZE - num_outside_bytes) };
|
||||
memcpy(req._buf.start, (void *)((addr_t)&_blk + num_outside_bytes), num_inside_bytes);
|
||||
_advance_curr_off(num_inside_bytes, Tresor::Request::READ, progress);
|
||||
break;
|
||||
}
|
||||
case INSIDE_BLKS_ACCESSED:
|
||||
|
||||
_advance_curr_off((_num_remaining_bytes() / BLOCK_SIZE) * BLOCK_SIZE, Tresor::Request::READ, progress);
|
||||
break;
|
||||
|
||||
case PROTRUDING_LAST_BLK_READ:
|
||||
|
||||
memcpy((void *)((addr_t)req._buf.start + _curr_buf_off()), &_blk, _num_remaining_bytes());
|
||||
_advance_curr_off(_num_remaining_bytes(), Tresor::Request::READ, progress);
|
||||
break;
|
||||
|
||||
default: break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Splitter_channel::execute(bool &progress)
|
||||
{
|
||||
if (!_req_ptr)
|
||||
return;
|
||||
|
||||
switch (_req_ptr->_op) {
|
||||
case Request::READ: _read(progress); break;
|
||||
case Request::WRITE: _write(progress); break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Block &Splitter_channel::_blk_buf_for_vba(Virtual_block_address vba)
|
||||
{
|
||||
ASSERT(_state == REQ_GENERATED);
|
||||
return *(Block *)(_curr_buf_addr + (vba - _curr_vba()) * BLOCK_SIZE);
|
||||
}
|
||||
|
||||
|
||||
Block const &Splitter::src_for_writing_vba(Request_tag tag, Virtual_block_address vba)
|
||||
{
|
||||
Block const *blk_ptr { };
|
||||
with_channel<Splitter_channel>(tag, [&] (Splitter_channel &chan) {
|
||||
blk_ptr = &chan.src_for_writing_vba(vba); });
|
||||
ASSERT(blk_ptr);
|
||||
return *blk_ptr;
|
||||
}
|
||||
|
||||
|
||||
Block &Splitter::dst_for_reading_vba(Request_tag tag, Virtual_block_address vba)
|
||||
{
|
||||
Block *blk_ptr { };
|
||||
with_channel<Splitter_channel>(tag, [&] (Splitter_channel &chan) {
|
||||
blk_ptr = &chan.dst_for_reading_vba(vba); });
|
||||
ASSERT(blk_ptr);
|
||||
return *blk_ptr;
|
||||
}
|
||||
|
||||
|
||||
Splitter::Splitter()
|
||||
{
|
||||
Module_channel_id id { 0 };
|
||||
for (Constructible<Channel> &chan : _channels) {
|
||||
chan.construct(id++);
|
||||
add_channel(*chan);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Splitter::execute(bool &progress)
|
||||
{
|
||||
for_each_channel<Splitter_channel>([&] (Splitter_channel &chan) {
|
||||
chan.execute(progress); });
|
||||
}
|
156
repos/gems/src/lib/vfs/tresor/splitter.h
Normal file
156
repos/gems/src/lib/vfs/tresor/splitter.h
Normal file
@ -0,0 +1,156 @@
|
||||
/*
|
||||
* \brief Module for splitting unaligned/uneven I/O requests
|
||||
* \author Martin Stein
|
||||
* \author Josef Soentgen
|
||||
* \date 2023-09-11
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2023 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
#ifndef _TRESOR__IO_SPLITTER_H_
|
||||
#define _TRESOR__IO_SPLITTER_H_
|
||||
|
||||
/* tresor includes */
|
||||
#include <tresor/request_pool.h>
|
||||
|
||||
namespace Tresor {
|
||||
|
||||
struct Lookup_buffer : Genode::Interface
|
||||
{
|
||||
virtual Block const &src_for_writing_vba(Request_tag, Virtual_block_address) = 0;
|
||||
virtual Block &dst_for_reading_vba(Request_tag, Virtual_block_address) = 0;
|
||||
};
|
||||
|
||||
class Splitter_request;
|
||||
class Splitter_channel;
|
||||
class Splitter;
|
||||
}
|
||||
|
||||
|
||||
class Tresor::Splitter_request : public Tresor::Module_request
|
||||
{
|
||||
friend class Splitter_channel;
|
||||
|
||||
public:
|
||||
|
||||
enum Operation { READ, WRITE };
|
||||
|
||||
private:
|
||||
|
||||
Operation const _op;
|
||||
Request_offset const _off;
|
||||
Key_id const _key_id;
|
||||
Generation const _gen;
|
||||
Byte_range_ptr const _buf;
|
||||
bool &_success;
|
||||
|
||||
NONCOPYABLE(Splitter_request);
|
||||
|
||||
public:
|
||||
|
||||
Splitter_request(Module_id, Module_channel_id, Operation, bool &, Request_offset, Byte_range_ptr const &, Key_id, Generation);
|
||||
|
||||
static char const *op_to_string(Operation);
|
||||
|
||||
void print(Genode::Output &out) const override { Genode::print(out, op_to_string(_op), " off ", _off, " size ", _buf.num_bytes); }
|
||||
};
|
||||
|
||||
|
||||
class Tresor::Splitter_channel : public Tresor::Module_channel
|
||||
{
|
||||
private:
|
||||
|
||||
using Request = Splitter_request;
|
||||
|
||||
|
||||
enum State : State_uint {
|
||||
PROTRUDING_FIRST_BLK_WRITTEN, PROTRUDING_LAST_BLK_WRITTEN, PROTRUDING_FIRST_BLK_READ, PROTRUDING_LAST_BLK_READ, INSIDE_BLKS_ACCESSED,
|
||||
REQ_SUBMITTED, REQ_GENERATED, REQ_COMPLETE };
|
||||
|
||||
State _state { };
|
||||
Request *_req_ptr { };
|
||||
addr_t _curr_off { };
|
||||
addr_t _curr_buf_addr { };
|
||||
Block _blk { };
|
||||
Generation _gen { };
|
||||
bool _generated_req_success { };
|
||||
|
||||
NONCOPYABLE(Splitter_channel);
|
||||
|
||||
void _generated_req_completed(State_uint) override;
|
||||
|
||||
void _request_submitted(Module_request &) override;
|
||||
|
||||
bool _request_complete() override { return _state == REQ_COMPLETE; }
|
||||
|
||||
Virtual_block_address _curr_vba() const { return (Virtual_block_address)(_curr_off / BLOCK_SIZE); }
|
||||
|
||||
addr_t _curr_buf_off() const
|
||||
{
|
||||
ASSERT(_curr_off >= _req_ptr->_off && _curr_off <= _req_ptr->_off + _req_ptr->_buf.num_bytes);
|
||||
return _curr_off - _req_ptr->_off;
|
||||
}
|
||||
|
||||
addr_t _num_remaining_bytes() const
|
||||
{
|
||||
ASSERT(_curr_off >= _req_ptr->_off && _curr_off <= _req_ptr->_off + _req_ptr->_buf.num_bytes);
|
||||
return _req_ptr->_off + _req_ptr->_buf.num_bytes - _curr_off;
|
||||
}
|
||||
|
||||
template <typename REQUEST, typename... ARGS>
|
||||
void _generate_req(State_uint state, bool &progress, ARGS &&... args)
|
||||
{
|
||||
_state = REQ_GENERATED;
|
||||
generate_req<REQUEST>(state, progress, args..., _generated_req_success);
|
||||
}
|
||||
|
||||
void _mark_req_successful(bool &);
|
||||
|
||||
void _advance_curr_off(addr_t, Tresor::Request::Operation, bool &);
|
||||
|
||||
void _read(bool &progress);
|
||||
|
||||
void _write(bool &progress);
|
||||
|
||||
Block &_blk_buf_for_vba(Virtual_block_address);
|
||||
|
||||
public:
|
||||
|
||||
Splitter_channel(Module_channel_id id) : Module_channel { SPLITTER, id } { }
|
||||
|
||||
void execute(bool &progress);
|
||||
|
||||
Block const &src_for_writing_vba(Virtual_block_address vba) { return _blk_buf_for_vba(vba); }
|
||||
|
||||
Block &dst_for_reading_vba(Virtual_block_address vba) { return _blk_buf_for_vba(vba); }
|
||||
};
|
||||
|
||||
|
||||
class Tresor::Splitter : public Tresor::Module, public Tresor::Lookup_buffer
|
||||
{
|
||||
private:
|
||||
|
||||
using Channel = Splitter_channel;
|
||||
|
||||
Constructible<Channel> _channels[1] { };
|
||||
|
||||
NONCOPYABLE(Splitter);
|
||||
|
||||
public:
|
||||
|
||||
Splitter();
|
||||
|
||||
void execute(bool &) override;
|
||||
|
||||
Block const &src_for_writing_vba(Request_tag, Virtual_block_address) override;
|
||||
|
||||
Block &dst_for_reading_vba(Request_tag, Virtual_block_address) override;
|
||||
};
|
||||
|
||||
|
||||
#endif /* _TRESOR__IO_SPLITTER_H_ */
|
File diff suppressed because it is too large
Load Diff
@ -12,13 +12,16 @@
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
/* base includes */
|
||||
#include <base/log.h>
|
||||
#include <util/string.h>
|
||||
|
||||
#include <aes_cbc_4k/aes_cbc_4k.h>
|
||||
|
||||
/* tresor includes */
|
||||
#include <tresor/types.h>
|
||||
#include <tresor/crypto/interface.h>
|
||||
|
||||
/* vfs tresor crypt includes */
|
||||
#include <aes_cbc_4k/aes_cbc_4k.h>
|
||||
#include <interface.h>
|
||||
|
||||
namespace {
|
||||
|
@ -1,17 +0,0 @@
|
||||
--
|
||||
-- \brief Integration of the Tresor block encryption
|
||||
-- \author Martin Stein
|
||||
-- \author Josef Soentgen
|
||||
-- \date 2020-11-10
|
||||
--
|
||||
|
||||
--
|
||||
-- Copyright (C) 2020 Genode Labs GmbH
|
||||
--
|
||||
-- This file is part of the Genode OS framework, which is distributed
|
||||
-- under the terms of the GNU Affero General Public License version 3.
|
||||
--
|
||||
|
||||
package Dummy
|
||||
is
|
||||
end Dummy;
|
@ -12,12 +12,12 @@
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
/* Genode includes */
|
||||
/* base includes */
|
||||
#include <base/log.h>
|
||||
#include <util/string.h>
|
||||
|
||||
/* tresor_crypto includes */
|
||||
#include <tresor/crypto/interface.h>
|
||||
/* vfs tresor crypt includes */
|
||||
#include <interface.h>
|
||||
|
||||
namespace {
|
||||
|
@ -18,8 +18,8 @@
|
||||
#include <util/arg_string.h>
|
||||
#include <util/xml_generator.h>
|
||||
|
||||
/* Tresor includes */
|
||||
#include <tresor/crypto/interface.h>
|
||||
/* vfs tresor crypto includes */
|
||||
#include <interface.h>
|
||||
|
||||
|
||||
namespace Vfs_tresor_crypto {
|
||||
|
@ -21,10 +21,8 @@
|
||||
/* OpenSSL includes */
|
||||
#include <openssl/sha.h>
|
||||
|
||||
/* tresor includes */
|
||||
#include <tresor/vfs/io_job.h>
|
||||
|
||||
/* local includes */
|
||||
/* vfs tresor trust anchor includes */
|
||||
#include <io_job.h>
|
||||
#include <aes_256.h>
|
||||
|
||||
enum { PRIVATE_KEY_SIZE = 32 };
|
||||
|
Loading…
Reference in New Issue
Block a user