mirror of
https://github.com/genodelabs/genode.git
synced 2025-01-18 02:40:08 +00:00
pc/platform: add intel IOMMU support
Add DMA remapping support for Intel devices to the platform driver. genodelabs/genode#5002
This commit is contained in:
parent
70b1ae3d1b
commit
855147a021
@ -47,6 +47,7 @@ class Driver::Common : Device_reporter,
|
||||
|
||||
Constructible<Expanding_reporter> _cfg_reporter { };
|
||||
Constructible<Expanding_reporter> _dev_reporter { };
|
||||
Constructible<Expanding_reporter> _iommu_reporter { };
|
||||
|
||||
void _handle_devices();
|
||||
bool _iommu();
|
||||
@ -97,8 +98,27 @@ void Driver::Common::acquire_io_mmu_devices()
|
||||
|
||||
});
|
||||
|
||||
/* iterate devices and determine address translation mode */
|
||||
bool mpu_present { false };
|
||||
bool device_present { false };
|
||||
_io_mmu_devices.for_each([&] (Io_mmu const & io_mmu) {
|
||||
if (io_mmu.mpu())
|
||||
mpu_present = true;
|
||||
else
|
||||
device_present = true;
|
||||
});
|
||||
|
||||
if (device_present && !mpu_present)
|
||||
_root.enable_dma_remapping();
|
||||
|
||||
bool kernel_iommu_present { false };
|
||||
_io_mmu_devices.for_each([&] (Io_mmu & io_mmu_dev) {
|
||||
if (io_mmu_dev.name() == "kernel_iommu")
|
||||
kernel_iommu_present = true;
|
||||
});
|
||||
|
||||
/* if kernel implements iommu, instantiate Kernel_iommu */
|
||||
if (_iommu())
|
||||
if (_iommu() && !kernel_iommu_present)
|
||||
new (_heap) Kernel_iommu(_env, _io_mmu_devices, "kernel_iommu");
|
||||
}
|
||||
|
||||
@ -128,6 +148,10 @@ void Driver::Common::update_report()
|
||||
if (_dev_reporter.constructed())
|
||||
_dev_reporter->generate([&] (Xml_generator & xml) {
|
||||
_devices.generate(xml); });
|
||||
if (_iommu_reporter.constructed())
|
||||
_iommu_reporter->generate([&] (Xml_generator & xml) {
|
||||
_io_mmu_devices.for_each([&] (Io_mmu & io_mmu) {
|
||||
io_mmu.generate(xml); }); });
|
||||
}
|
||||
|
||||
|
||||
@ -147,6 +171,8 @@ void Driver::Common::handle_config(Xml_node config)
|
||||
_env, "devices", "devices");
|
||||
_cfg_reporter.conditional(node.attribute_value("config", false),
|
||||
_env, "config", "config");
|
||||
_iommu_reporter.conditional(node.attribute_value("iommu", false),
|
||||
_env, "iommu", "iommu");
|
||||
});
|
||||
|
||||
_root.update_policy();
|
||||
|
@ -134,8 +134,7 @@ void Device_pd::enable_pci_device(Io_mem_dataspace_capability const io_mem_cap,
|
||||
}
|
||||
|
||||
|
||||
void Device_pd::disable_pci_device(Io_mem_dataspace_capability const,
|
||||
Pci::Bdf const)
|
||||
void Device_pd::disable_pci_device(Pci::Bdf const)
|
||||
{
|
||||
warning("Cannot unassign PCI device from device PD (not implemented by kernel).");
|
||||
}
|
||||
@ -150,7 +149,7 @@ Device_pd::Device_pd(Env & env,
|
||||
Registry<Dma_buffer> const & buffer_registry)
|
||||
|
||||
:
|
||||
Io_mmu::Domain(io_mmu, md_alloc, buffer_registry),
|
||||
Io_mmu::Domain(io_mmu, md_alloc),
|
||||
_pd(env, Pd_connection::Device_pd()),
|
||||
_address_space(env, _pd, ram_guard, cap_guard)
|
||||
{
|
||||
|
@ -94,8 +94,7 @@ class Driver::Device_pd : public Io_mmu::Domain
|
||||
|
||||
void enable_pci_device(Io_mem_dataspace_capability const,
|
||||
Pci::Bdf const) override;
|
||||
void disable_pci_device(Io_mem_dataspace_capability const,
|
||||
Pci::Bdf const) override;
|
||||
void disable_pci_device(Pci::Bdf const) override;
|
||||
};
|
||||
|
||||
|
||||
@ -113,6 +112,7 @@ class Driver::Kernel_iommu : public Io_mmu
|
||||
|
||||
Driver::Io_mmu::Domain & create_domain(
|
||||
Allocator & md_alloc,
|
||||
Ram_allocator &,
|
||||
Registry<Dma_buffer> const & buffer_registry,
|
||||
Ram_quota_guard & ram_guard,
|
||||
Cap_quota_guard & cap_guard) override
|
||||
|
@ -25,12 +25,10 @@ addr_t Dma_allocator::_alloc_dma_addr(addr_t const phys_addr,
|
||||
{
|
||||
using Alloc_error = Allocator::Alloc_error;
|
||||
|
||||
if (!_iommu) return phys_addr;
|
||||
|
||||
/*
|
||||
* 1:1 mapping (allocate at specified range from DMA memory allocator)
|
||||
*/
|
||||
if (force_phys_addr) {
|
||||
if (force_phys_addr || !_remapping) {
|
||||
return _dma_alloc.alloc_addr(size, phys_addr).convert<addr_t>(
|
||||
[&] (void *) -> addr_t { return phys_addr; },
|
||||
[&] (Alloc_error err) -> addr_t {
|
||||
@ -86,6 +84,9 @@ Dma_buffer & Dma_allocator::alloc_buffer(Ram_dataspace_capability cap,
|
||||
{
|
||||
addr_t dma_addr = _alloc_dma_addr(phys_addr, size, false);
|
||||
|
||||
if (!dma_addr)
|
||||
throw Out_of_virtual_memory();
|
||||
|
||||
try {
|
||||
return * new (_md_alloc) Dma_buffer(_registry, *this, cap, dma_addr, size,
|
||||
phys_addr);
|
||||
@ -101,15 +102,14 @@ Dma_buffer & Dma_allocator::alloc_buffer(Ram_dataspace_capability cap,
|
||||
|
||||
void Dma_allocator::_free_dma_addr(addr_t dma_addr)
|
||||
{
|
||||
if (_iommu)
|
||||
_dma_alloc.free((void *)dma_addr);
|
||||
_dma_alloc.free((void *)dma_addr);
|
||||
}
|
||||
|
||||
|
||||
Dma_allocator::Dma_allocator(Allocator & md_alloc,
|
||||
bool const iommu)
|
||||
bool const remapping)
|
||||
:
|
||||
_md_alloc(md_alloc), _iommu(iommu)
|
||||
_md_alloc(md_alloc), _remapping(remapping)
|
||||
{
|
||||
/* 0x1000 - 4GB */
|
||||
enum { DMA_SIZE = 0xffffe000 };
|
||||
|
@ -51,12 +51,16 @@ struct Driver::Dma_buffer : Registry<Dma_buffer>::Element
|
||||
|
||||
class Driver::Dma_allocator
|
||||
{
|
||||
public:
|
||||
|
||||
struct Out_of_virtual_memory : Exception { };
|
||||
|
||||
private:
|
||||
|
||||
friend class Dma_buffer;
|
||||
|
||||
Allocator & _md_alloc;
|
||||
bool const _iommu;
|
||||
bool _remapping;
|
||||
Allocator_avl _dma_alloc { &_md_alloc };
|
||||
Registry<Dma_buffer> _registry { };
|
||||
|
||||
@ -65,6 +69,9 @@ class Driver::Dma_allocator
|
||||
|
||||
public:
|
||||
|
||||
void enable_remapping() { _remapping = true; }
|
||||
bool remapping() { return _remapping; }
|
||||
|
||||
bool reserve(addr_t phys_addr, size_t size);
|
||||
void unreserve(addr_t phys_addr, size_t size);
|
||||
|
||||
@ -73,7 +80,7 @@ class Driver::Dma_allocator
|
||||
Registry<Dma_buffer> & buffer_registry() { return _registry; }
|
||||
Registry<Dma_buffer> const & buffer_registry() const { return _registry; }
|
||||
|
||||
Dma_allocator(Allocator & md_alloc, bool const iommu);
|
||||
Dma_allocator(Allocator & md_alloc, bool const remapping);
|
||||
};
|
||||
|
||||
#endif /* _SRC__DRIVERS__PLATFORM__DMA_ALLOCATOR_H */
|
||||
|
@ -80,8 +80,7 @@ class Driver::Io_mmu : private Io_mmu_devices::Element
|
||||
/* interface for (un)assigning a pci device */
|
||||
virtual void enable_pci_device(Io_mem_dataspace_capability const,
|
||||
Pci::Bdf const) = 0;
|
||||
virtual void disable_pci_device(Io_mem_dataspace_capability const,
|
||||
Pci::Bdf const) = 0;
|
||||
virtual void disable_pci_device(Pci::Bdf const) = 0;
|
||||
|
||||
/* interface for adding/removing DMA buffers */
|
||||
virtual void add_range(Range const &,
|
||||
@ -116,7 +115,7 @@ class Driver::Io_mmu : private Io_mmu_devices::Element
|
||||
_enable();
|
||||
|
||||
_active_domains++;
|
||||
};
|
||||
}
|
||||
|
||||
void _disable_domain()
|
||||
{
|
||||
@ -125,7 +124,7 @@ class Driver::Io_mmu : private Io_mmu_devices::Element
|
||||
|
||||
if (!_active_domains)
|
||||
_disable();
|
||||
};
|
||||
}
|
||||
|
||||
void _destroy_domains()
|
||||
{
|
||||
@ -141,14 +140,17 @@ class Driver::Io_mmu : private Io_mmu_devices::Element
|
||||
return &domain._io_mmu == this; }
|
||||
|
||||
/* Return true if device requires physical addressing */
|
||||
virtual bool mpu() { return false; };
|
||||
virtual bool mpu() const { return false; }
|
||||
|
||||
/* Create a Io_mmu::Domain object */
|
||||
virtual Domain & create_domain(Allocator &,
|
||||
Ram_allocator &,
|
||||
Registry<Dma_buffer> const &,
|
||||
Ram_quota_guard &,
|
||||
Cap_quota_guard &) = 0;
|
||||
|
||||
virtual void generate(Xml_generator &) { }
|
||||
|
||||
Io_mmu(Io_mmu_devices & io_mmu_devices,
|
||||
Device::Name const & name)
|
||||
: Io_mmu_devices::Element(io_mmu_devices, *this),
|
||||
|
@ -38,10 +38,11 @@ struct Driver::Io_mmu_domain_wrapper
|
||||
|
||||
Io_mmu_domain_wrapper(Io_mmu & io_mmu,
|
||||
Allocator & md_alloc,
|
||||
Ram_allocator & ram_alloc,
|
||||
Registry<Dma_buffer> const & dma_buffers,
|
||||
Ram_quota_guard & ram_guard,
|
||||
Cap_quota_guard & cap_guard)
|
||||
: domain(io_mmu.create_domain(md_alloc, dma_buffers, ram_guard, cap_guard))
|
||||
: domain(io_mmu.create_domain(md_alloc, ram_alloc, dma_buffers, ram_guard, cap_guard))
|
||||
{ }
|
||||
|
||||
~Io_mmu_domain_wrapper() { destroy(domain.md_alloc(), &domain); }
|
||||
@ -54,11 +55,12 @@ struct Driver::Io_mmu_domain : private Registry<Io_mmu_domain>::Element,
|
||||
Io_mmu_domain(Registry<Io_mmu_domain> & registry,
|
||||
Io_mmu & io_mmu,
|
||||
Allocator & md_alloc,
|
||||
Ram_allocator & ram_alloc,
|
||||
Registry<Dma_buffer> const & dma_buffers,
|
||||
Ram_quota_guard & ram_guard,
|
||||
Cap_quota_guard & cap_guard)
|
||||
: Registry<Io_mmu_domain>::Element(registry, *this),
|
||||
Io_mmu_domain_wrapper(io_mmu, md_alloc, dma_buffers, ram_guard, cap_guard)
|
||||
Io_mmu_domain_wrapper(io_mmu, md_alloc, ram_alloc, dma_buffers, ram_guard, cap_guard)
|
||||
{ }
|
||||
};
|
||||
|
||||
@ -73,11 +75,12 @@ class Driver::Io_mmu_domain_registry : public Registry<Io_mmu_domain>
|
||||
|
||||
void default_domain(Io_mmu & io_mmu,
|
||||
Allocator & md_alloc,
|
||||
Ram_allocator & ram_alloc,
|
||||
Registry<Dma_buffer> const & dma_buffers,
|
||||
Ram_quota_guard & ram_quota_guard,
|
||||
Cap_quota_guard & cap_quota_guard)
|
||||
{
|
||||
_default_domain.construct(io_mmu, md_alloc, dma_buffers,
|
||||
_default_domain.construct(io_mmu, md_alloc, ram_alloc, dma_buffers,
|
||||
ram_quota_guard, cap_quota_guard);
|
||||
}
|
||||
|
||||
|
@ -48,7 +48,7 @@ Driver::Session_component * Driver::Root::_create_session(const char *args)
|
||||
session_diag_from_args(args),
|
||||
policy.attribute_value("info", false),
|
||||
policy.attribute_value("version", Version()),
|
||||
_iommu);
|
||||
_io_mmu_present || _kernel_iommu, _kernel_iommu);
|
||||
} catch (Session_policy::No_policy_defined) {
|
||||
error("Invalid session request, no matching policy for ",
|
||||
"'", label_from_args(args).string(), "'");
|
||||
@ -74,8 +74,8 @@ Driver::Root::Root(Env & env,
|
||||
Attached_rom_dataspace const & config,
|
||||
Device_model & devices,
|
||||
Io_mmu_devices & io_mmu_devices,
|
||||
bool const iommu)
|
||||
bool const kernel_iommu)
|
||||
: Root_component<Session_component>(env.ep(), sliced_heap),
|
||||
_env(env), _config(config), _devices(devices),
|
||||
_io_mmu_devices(io_mmu_devices), _iommu(iommu)
|
||||
_io_mmu_devices(io_mmu_devices), _kernel_iommu(kernel_iommu)
|
||||
{ }
|
||||
|
@ -32,10 +32,24 @@ class Driver::Root : public Root_component<Session_component>
|
||||
Attached_rom_dataspace const & config,
|
||||
Device_model & devices,
|
||||
Io_mmu_devices & io_mmu_devices,
|
||||
bool const iommu);
|
||||
bool const kernel_iommu);
|
||||
|
||||
void update_policy();
|
||||
|
||||
void enable_dma_remapping()
|
||||
{
|
||||
_io_mmu_present = true;
|
||||
|
||||
/**
|
||||
* IOMMU devices may appear after the first sessions have been
|
||||
* created. We therefore need to propagate this to the already
|
||||
* created sessions.
|
||||
*/
|
||||
_sessions.for_each([&] (Session_component & sess) {
|
||||
sess.enable_dma_remapping();
|
||||
});
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
Session_component * _create_session(const char * args) override;
|
||||
@ -46,7 +60,8 @@ class Driver::Root : public Root_component<Session_component>
|
||||
Attached_rom_dataspace const & _config;
|
||||
Device_model & _devices;
|
||||
Io_mmu_devices & _io_mmu_devices;
|
||||
bool const _iommu;
|
||||
bool _io_mmu_present { false };
|
||||
bool const _kernel_iommu;
|
||||
Registry<Session_component> _sessions {};
|
||||
};
|
||||
|
||||
|
@ -36,13 +36,14 @@ Session_component::_acquire(Device & device)
|
||||
[&] () {
|
||||
_io_mmu_devices.for_each([&] (Io_mmu & io_mmu_dev) {
|
||||
if (io_mmu_dev.name() == io_mmu.name) {
|
||||
if (io_mmu_dev.mpu() && _iommu)
|
||||
if (io_mmu_dev.mpu() && _dma_allocator.remapping())
|
||||
error("Unable to create domain for MPU device ",
|
||||
io_mmu_dev.name(), " for an IOMMU-enabled session.");
|
||||
else
|
||||
new (heap()) Io_mmu_domain(_domain_registry,
|
||||
io_mmu_dev,
|
||||
heap(),
|
||||
_env_ram,
|
||||
_dma_allocator.buffer_registry(),
|
||||
_ram_quota_guard(),
|
||||
_cap_quota_guard());
|
||||
@ -366,7 +367,7 @@ Session_component::alloc_dma_buffer(size_t const size, Cache cache)
|
||||
} catch (Out_of_caps) {
|
||||
_env_ram.free(ram_cap);
|
||||
throw;
|
||||
}
|
||||
} catch (Dma_allocator::Out_of_virtual_memory) { }
|
||||
|
||||
return ram_cap;
|
||||
}
|
||||
@ -407,14 +408,15 @@ Session_component::Session_component(Env & env,
|
||||
Diag const & diag,
|
||||
bool const info,
|
||||
Policy_version const version,
|
||||
bool const iommu)
|
||||
bool const dma_remapping,
|
||||
bool const kernel_iommu)
|
||||
:
|
||||
Session_object<Platform::Session>(env.ep(), resources, label, diag),
|
||||
Session_registry::Element(registry, *this),
|
||||
Dynamic_rom_session::Xml_producer("devices"),
|
||||
_env(env), _config(config), _devices(devices),
|
||||
_io_mmu_devices(io_mmu_devices), _info(info), _version(version),
|
||||
_iommu(iommu)
|
||||
_dma_allocator(_md_alloc, dma_remapping)
|
||||
{
|
||||
/*
|
||||
* FIXME: As the ROM session does not propagate Out_of_*
|
||||
@ -433,11 +435,12 @@ Session_component::Session_component(Env & env,
|
||||
* there is a kernel_iommu used by each device if _iommu is set. We therefore
|
||||
* construct a corresponding domain object at session construction.
|
||||
*/
|
||||
if (_iommu)
|
||||
if (kernel_iommu)
|
||||
_io_mmu_devices.for_each([&] (Io_mmu & io_mmu_dev) {
|
||||
if (io_mmu_dev.name() == "kernel_iommu") {
|
||||
_domain_registry.default_domain(io_mmu_dev,
|
||||
heap(),
|
||||
_env_ram,
|
||||
_dma_allocator.buffer_registry(),
|
||||
_ram_quota_guard(),
|
||||
_cap_quota_guard());
|
||||
|
@ -57,7 +57,8 @@ class Driver::Session_component
|
||||
Diag const & diag,
|
||||
bool const info,
|
||||
Policy_version const version,
|
||||
bool const iommu);
|
||||
bool const dma_remapping,
|
||||
bool const kernel_iommu);
|
||||
|
||||
~Session_component();
|
||||
|
||||
@ -65,6 +66,8 @@ class Driver::Session_component
|
||||
Io_mmu_domain_registry & domain_registry();
|
||||
Dma_allocator & dma_allocator();
|
||||
|
||||
void enable_dma_remapping() { _dma_allocator.enable_remapping(); }
|
||||
|
||||
bool matches(Device const &) const;
|
||||
|
||||
Ram_quota_guard & ram_quota_guard() { return _ram_quota_guard(); }
|
||||
@ -116,8 +119,7 @@ class Driver::Session_component
|
||||
_env.rm(), *this };
|
||||
bool _info;
|
||||
Policy_version _version;
|
||||
bool const _iommu;
|
||||
Dma_allocator _dma_allocator { _md_alloc, _iommu };
|
||||
Dma_allocator _dma_allocator;
|
||||
|
||||
Device_capability _acquire(Device & device);
|
||||
void _release_device(Device_component & dc);
|
||||
|
24
repos/pc/src/drivers/platform/pc/clflush.h
Normal file
24
repos/pc/src/drivers/platform/pc/clflush.h
Normal file
@ -0,0 +1,24 @@
|
||||
/*
|
||||
* \brief Helper for flushing translation-table entries from cache
|
||||
* \author Johannes Schlatow
|
||||
* \date 2023-09-20
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2023 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
#ifndef _SRC__DRIVERS__PLATFORM__CLFLUSH_H_
|
||||
#define _SRC__DRIVERS__PLATFORM__CLFLUSH_H_
|
||||
|
||||
namespace Utils {
|
||||
inline void clflush(volatile void *addr)
|
||||
{
|
||||
asm volatile("clflush %0" : "+m" (*(volatile char *)addr));
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* _SRC__DRIVERS__PLATFORM__CLFLUSH_H_ */
|
@ -0,0 +1,244 @@
|
||||
/*
|
||||
* \brief Expanding page table allocator
|
||||
* \author Johannes Schlatow
|
||||
* \date 2023-10-18
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2023 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
#ifndef _SRC__DRIVERS__PLATFORM__PC__EXPANDING_PAGE_TABLE_ALLOCATOR_H_
|
||||
#define _SRC__DRIVERS__PLATFORM__PC__EXPANDING_PAGE_TABLE_ALLOCATOR_H_
|
||||
|
||||
/* Genode includes */
|
||||
#include <base/allocator_avl.h>
|
||||
#include <base/attached_ram_dataspace.h>
|
||||
#include <pd_session/pd_session.h>
|
||||
#include <util/avl_tree.h>
|
||||
|
||||
namespace Driver {
|
||||
using namespace Genode;
|
||||
|
||||
template <size_t TABLE_SIZE>
|
||||
class Expanding_page_table_allocator;
|
||||
}
|
||||
|
||||
|
||||
template <Genode::size_t TABLE_SIZE>
|
||||
class Driver::Expanding_page_table_allocator
|
||||
{
|
||||
public:
|
||||
|
||||
struct Alloc_failed : Exception { };
|
||||
|
||||
private:
|
||||
|
||||
using Alloc_result = Allocator::Alloc_result;
|
||||
using Alloc_error = Allocator::Alloc_error;
|
||||
|
||||
enum { MAX_CHUNK_SIZE = 2*1024*1024 };
|
||||
|
||||
class Backing_store : Noncopyable
|
||||
{
|
||||
public:
|
||||
|
||||
class Element : public Avl_node<Element>
|
||||
{
|
||||
private:
|
||||
|
||||
Range_allocator & _range_alloc;
|
||||
Attached_ram_dataspace _dataspace;
|
||||
addr_t _virt_addr;
|
||||
addr_t _phys_addr;
|
||||
|
||||
friend class Backing_store;
|
||||
|
||||
Element * _matching_sub_tree(addr_t pa)
|
||||
{
|
||||
typename Avl_node<Element>::Side side = (pa > _phys_addr);
|
||||
return Avl_node<Element>::child(side);
|
||||
}
|
||||
|
||||
public:
|
||||
|
||||
Element(Range_allocator & range_alloc,
|
||||
Ram_allocator & ram_alloc,
|
||||
Region_map & rm,
|
||||
Pd_session & pd,
|
||||
size_t size)
|
||||
: _range_alloc(range_alloc),
|
||||
_dataspace(ram_alloc, rm, size, Genode::CACHED),
|
||||
_virt_addr((addr_t)_dataspace.local_addr<void>()),
|
||||
_phys_addr(pd.dma_addr(_dataspace.cap()))
|
||||
{
|
||||
_range_alloc.add_range(_phys_addr, size);
|
||||
}
|
||||
|
||||
~Element() {
|
||||
_range_alloc.remove_range(_phys_addr, _dataspace.size()); }
|
||||
|
||||
bool matches(addr_t pa)
|
||||
{
|
||||
return pa >= _phys_addr &&
|
||||
pa < _phys_addr + _dataspace.size();
|
||||
}
|
||||
|
||||
addr_t virt_addr(addr_t phys_addr) {
|
||||
return _virt_addr + (phys_addr - _phys_addr); }
|
||||
|
||||
/*
|
||||
* Avl_node interface
|
||||
*/
|
||||
bool higher(Element * other) {
|
||||
return other->_phys_addr > _phys_addr; }
|
||||
};
|
||||
|
||||
private:
|
||||
|
||||
Avl_tree<Element> _tree { };
|
||||
Env & _env;
|
||||
Allocator & _md_alloc;
|
||||
Ram_allocator & _ram_alloc;
|
||||
Range_allocator & _range_alloc;
|
||||
size_t _chunk_size;
|
||||
|
||||
public:
|
||||
|
||||
Backing_store(Env & env,
|
||||
Allocator & md_alloc,
|
||||
Ram_allocator & ram_alloc,
|
||||
Range_allocator & range_alloc,
|
||||
size_t start_size)
|
||||
: _env(env), _md_alloc(md_alloc), _ram_alloc(ram_alloc),
|
||||
_range_alloc(range_alloc), _chunk_size(start_size)
|
||||
{ }
|
||||
|
||||
~Backing_store();
|
||||
|
||||
/* double backing store size (until MAX_CHUNK_SIZE is reached) */
|
||||
void grow();
|
||||
|
||||
template <typename FN1, typename FN2>
|
||||
void with_virt_addr(addr_t pa, FN1 && match_fn, FN2 && no_match_fn)
|
||||
{
|
||||
Element * e = _tree.first();
|
||||
|
||||
for (;;) {
|
||||
if (!e) break;
|
||||
|
||||
if (e->matches(pa)) {
|
||||
match_fn(e->virt_addr(pa));
|
||||
return;
|
||||
}
|
||||
|
||||
e = e->_matching_sub_tree(pa);
|
||||
}
|
||||
|
||||
no_match_fn();
|
||||
};
|
||||
};
|
||||
|
||||
Allocator_avl _allocator;
|
||||
Backing_store _backing_store;
|
||||
|
||||
addr_t _alloc();
|
||||
|
||||
public:
|
||||
|
||||
Expanding_page_table_allocator(Genode::Env & env,
|
||||
Allocator & md_alloc,
|
||||
Ram_allocator & ram_alloc,
|
||||
size_t start_count)
|
||||
: _allocator(&md_alloc),
|
||||
_backing_store(env, md_alloc, ram_alloc, _allocator, start_count*TABLE_SIZE)
|
||||
{ }
|
||||
|
||||
template <typename TABLE, typename FN1, typename FN2>
|
||||
void with_table(addr_t phys_addr, FN1 && match_fn, FN2 no_match_fn)
|
||||
{
|
||||
static_assert((sizeof(TABLE) == TABLE_SIZE), "unexpected size");
|
||||
|
||||
_backing_store.with_virt_addr(phys_addr,
|
||||
[&] (addr_t va) {
|
||||
match_fn(*(TABLE*)va); },
|
||||
no_match_fn);
|
||||
}
|
||||
|
||||
template <typename TABLE> addr_t construct()
|
||||
{
|
||||
static_assert((sizeof(TABLE) == TABLE_SIZE), "unexpected size");
|
||||
|
||||
addr_t phys_addr = _alloc();
|
||||
_backing_store.with_virt_addr(phys_addr,
|
||||
[&] (addr_t va) {
|
||||
construct_at<TABLE>((void*)va); },
|
||||
[&] () {});
|
||||
|
||||
return phys_addr;
|
||||
}
|
||||
|
||||
template <typename TABLE> void destruct(addr_t phys_addr)
|
||||
{
|
||||
static_assert((sizeof(TABLE) == TABLE_SIZE), "unexpected size");
|
||||
|
||||
with_table<TABLE>(phys_addr,
|
||||
[&] (TABLE & table) {
|
||||
table.~TABLE();
|
||||
_allocator.free((void*)phys_addr);
|
||||
},
|
||||
[&] () {
|
||||
error("Trying to destruct foreign table at ", Hex(phys_addr));
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
template <Genode::size_t TABLE_SIZE>
|
||||
Driver::Expanding_page_table_allocator<TABLE_SIZE>::Backing_store::~Backing_store()
|
||||
{
|
||||
while (_tree.first()) {
|
||||
Element * e = _tree.first();
|
||||
_tree.remove(e);
|
||||
destroy(_md_alloc, e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template <Genode::size_t TABLE_SIZE>
|
||||
void Driver::Expanding_page_table_allocator<TABLE_SIZE>::Backing_store::grow()
|
||||
{
|
||||
Element * e = new (_md_alloc) Element(_range_alloc, _ram_alloc,
|
||||
_env.rm(), _env.pd(), _chunk_size);
|
||||
|
||||
_tree.insert(e);
|
||||
|
||||
/* double _chunk_size */
|
||||
_chunk_size = Genode::min(_chunk_size << 1, (size_t)MAX_CHUNK_SIZE);
|
||||
}
|
||||
|
||||
|
||||
template <Genode::size_t TABLE_SIZE>
|
||||
Genode::addr_t Driver::Expanding_page_table_allocator<TABLE_SIZE>::_alloc()
|
||||
{
|
||||
const unsigned align = (unsigned)Genode::log2(TABLE_SIZE);
|
||||
|
||||
Alloc_result result = _allocator.alloc_aligned(TABLE_SIZE, align);
|
||||
|
||||
if (result.failed()) {
|
||||
|
||||
_backing_store.grow();
|
||||
|
||||
/* retry allocation */
|
||||
result = _allocator.alloc_aligned(TABLE_SIZE, align);
|
||||
}
|
||||
|
||||
return result.convert<addr_t>(
|
||||
[&] (void * ptr) -> addr_t { return (addr_t)ptr; },
|
||||
[&] (Alloc_error) -> addr_t { throw Alloc_failed(); });
|
||||
}
|
||||
|
||||
#endif /* _SRC__DRIVERS__PLATFORM__PC__EXPANDING_PAGE_TABLE_ALLOCATOR_H_ */
|
74
repos/pc/src/drivers/platform/pc/hw/page_flags.h
Normal file
74
repos/pc/src/drivers/platform/pc/hw/page_flags.h
Normal file
@ -0,0 +1,74 @@
|
||||
/*
|
||||
* \brief Generic page flags
|
||||
* \author Stefan Kalkowski
|
||||
* \date 2014-02-24
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2014-2017 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
#ifndef _SRC__DRIVERS__PLATFORM__PC__HW__PAGE_FLAGS_H_
|
||||
#define _SRC__DRIVERS__PLATFORM__PC__HW__PAGE_FLAGS_H_
|
||||
|
||||
#include <base/cache.h>
|
||||
#include <base/output.h>
|
||||
|
||||
namespace Hw {
|
||||
|
||||
enum Writeable { RO, RW };
|
||||
enum Executeable { NO_EXEC, EXEC };
|
||||
enum Privileged { USER, KERN };
|
||||
enum Global { NO_GLOBAL, GLOBAL };
|
||||
enum Type { RAM, DEVICE };
|
||||
|
||||
struct Page_flags;
|
||||
}
|
||||
|
||||
|
||||
struct Hw::Page_flags
|
||||
{
|
||||
Writeable writeable;
|
||||
Executeable executable;
|
||||
Privileged privileged;
|
||||
Global global;
|
||||
Type type;
|
||||
Genode::Cache cacheable;
|
||||
|
||||
void print(Genode::Output & out) const
|
||||
{
|
||||
using Genode::print;
|
||||
using namespace Genode;
|
||||
|
||||
print(out, (writeable == RW) ? "writeable, " : "readonly, ",
|
||||
(executable ==EXEC) ? "exec, " : "noexec, ");
|
||||
if (privileged == KERN) print(out, "privileged, ");
|
||||
if (global == GLOBAL) print(out, "global, ");
|
||||
if (type == DEVICE) print(out, "iomem, ");
|
||||
switch (cacheable) {
|
||||
case UNCACHED: print(out, "uncached"); break;
|
||||
case CACHED: print(out, "cached"); break;
|
||||
case WRITE_COMBINED: print(out, "write-combined"); break;
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
namespace Hw {
|
||||
|
||||
static constexpr Page_flags PAGE_FLAGS_KERN_IO
|
||||
{ RW, NO_EXEC, KERN, GLOBAL, DEVICE, Genode::UNCACHED };
|
||||
static constexpr Page_flags PAGE_FLAGS_KERN_DATA
|
||||
{ RW, EXEC, KERN, GLOBAL, RAM, Genode::CACHED };
|
||||
static constexpr Page_flags PAGE_FLAGS_KERN_TEXT
|
||||
{ RW, EXEC, KERN, GLOBAL, RAM, Genode::CACHED };
|
||||
static constexpr Page_flags PAGE_FLAGS_KERN_EXCEP
|
||||
{ RW, EXEC, KERN, GLOBAL, RAM, Genode::CACHED };
|
||||
static constexpr Page_flags PAGE_FLAGS_UTCB
|
||||
{ RW, NO_EXEC, USER, NO_GLOBAL, RAM, Genode::CACHED };
|
||||
}
|
||||
|
||||
#endif /* _SRC__DRIVERS__PLATFORM__PC__HW__PAGE_FLAGS_H_ */
|
148
repos/pc/src/drivers/platform/pc/hw/page_table_allocator.h
Normal file
148
repos/pc/src/drivers/platform/pc/hw/page_table_allocator.h
Normal file
@ -0,0 +1,148 @@
|
||||
/*
|
||||
* \brief Page table allocator
|
||||
* \author Stefan Kalkowski
|
||||
* \author Johannes Schlatow
|
||||
* \date 2015-06-10
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2015-2023 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
#ifndef _SRC__DRIVERS__PLATFORM__PC__HW__PAGE_TABLE_ALLOCATOR_H_
|
||||
#define _SRC__DRIVERS__PLATFORM__PC__HW__PAGE_TABLE_ALLOCATOR_H_
|
||||
|
||||
#include <util/bit_allocator.h>
|
||||
#include <util/construct_at.h>
|
||||
|
||||
namespace Hw {
|
||||
template <Genode::size_t TABLE_SIZE> class Page_table_allocator;
|
||||
struct Out_of_tables {};
|
||||
}
|
||||
|
||||
template <Genode::size_t TABLE_SIZE>
|
||||
class Hw::Page_table_allocator
|
||||
{
|
||||
protected:
|
||||
|
||||
using addr_t = Genode::addr_t;
|
||||
using size_t = Genode::size_t;
|
||||
|
||||
addr_t const _virt_addr;
|
||||
addr_t const _phys_addr;
|
||||
size_t const _size;
|
||||
|
||||
template <typename TABLE> addr_t _offset(TABLE & table) {
|
||||
return (addr_t)&table - _virt_addr; }
|
||||
|
||||
void * _index(unsigned idx) {
|
||||
return (void*)(_virt_addr + TABLE_SIZE*idx); }
|
||||
|
||||
virtual unsigned _alloc() = 0;
|
||||
virtual void _free(unsigned idx) = 0;
|
||||
|
||||
public:
|
||||
|
||||
template <unsigned COUNT> class Array;
|
||||
|
||||
Page_table_allocator(addr_t virt_addr, addr_t phys_addr, size_t size)
|
||||
: _virt_addr(virt_addr), _phys_addr(phys_addr), _size(size) { }
|
||||
|
||||
virtual ~Page_table_allocator() { }
|
||||
|
||||
template <typename TABLE, typename FN1, typename FN2>
|
||||
void with_table(addr_t phys_addr, FN1 && match_fn, FN2 no_match_fn)
|
||||
{
|
||||
static_assert((sizeof(TABLE) == TABLE_SIZE), "unexpected size");
|
||||
|
||||
if (phys_addr >= _phys_addr && phys_addr < _phys_addr + _size)
|
||||
match_fn(*(TABLE*)(_virt_addr + (phys_addr - _phys_addr)));
|
||||
else
|
||||
no_match_fn();
|
||||
}
|
||||
|
||||
template <typename TABLE> addr_t construct()
|
||||
{
|
||||
static_assert((sizeof(TABLE) == TABLE_SIZE), "unexpected size");
|
||||
TABLE & table = *Genode::construct_at<TABLE>(_index(_alloc()));
|
||||
return _offset(table) + _phys_addr;
|
||||
}
|
||||
|
||||
template <typename TABLE> void destruct(addr_t phys_addr)
|
||||
{
|
||||
static_assert((sizeof(TABLE) == TABLE_SIZE), "unexpected size");
|
||||
|
||||
with_table<TABLE>(phys_addr,
|
||||
[&] (TABLE & table) {
|
||||
table.~TABLE();
|
||||
_free((unsigned)(_offset(table) / sizeof(TABLE)));
|
||||
},
|
||||
[&] () {
|
||||
Genode::error("Trying to destruct foreign table at ", Genode::Hex(phys_addr));
|
||||
});
|
||||
}
|
||||
|
||||
size_t size() const { return _size; }
|
||||
};
|
||||
|
||||
|
||||
template <Genode::size_t TABLE_SIZE>
|
||||
template <unsigned COUNT>
|
||||
class Hw::Page_table_allocator<TABLE_SIZE>::Array
|
||||
{
|
||||
public:
|
||||
|
||||
class Allocator;
|
||||
|
||||
private:
|
||||
|
||||
struct Table { Genode::uint8_t data[TABLE_SIZE]; };
|
||||
|
||||
Table _tables[COUNT];
|
||||
Allocator _alloc;
|
||||
|
||||
public:
|
||||
|
||||
Array() : _alloc((Table*)&_tables, (addr_t)&_tables) {}
|
||||
|
||||
template <typename T>
|
||||
explicit Array(T phys_addr)
|
||||
: _alloc(_tables, phys_addr((void*)_tables), COUNT * TABLE_SIZE) { }
|
||||
|
||||
Page_table_allocator<TABLE_SIZE> & alloc() { return _alloc; }
|
||||
};
|
||||
|
||||
|
||||
template <Genode::size_t TABLE_SIZE>
|
||||
template <unsigned COUNT>
|
||||
class Hw::Page_table_allocator<TABLE_SIZE>::Array<COUNT>::Allocator
|
||||
:
|
||||
public Hw::Page_table_allocator<TABLE_SIZE>
|
||||
{
|
||||
private:
|
||||
|
||||
using Bit_allocator = Genode::Bit_allocator<COUNT>;
|
||||
using Array = Page_table_allocator<TABLE_SIZE>::Array<COUNT>;
|
||||
|
||||
Bit_allocator _free_tables { };
|
||||
|
||||
unsigned _alloc() override
|
||||
{
|
||||
try {
|
||||
return (unsigned)_free_tables.alloc();
|
||||
} catch (typename Bit_allocator::Out_of_indices&) {}
|
||||
throw Out_of_tables();
|
||||
}
|
||||
|
||||
void _free(unsigned idx) override { _free_tables.free(idx); }
|
||||
|
||||
public:
|
||||
|
||||
Allocator(Table * tables, addr_t phys_addr, size_t size)
|
||||
: Page_table_allocator((addr_t)tables, phys_addr, size) {}
|
||||
};
|
||||
|
||||
#endif /* _SRC__DRIVERS__PLATFORM__PC__HW__PAGE_TABLE_ALLOCATOR_H_ */
|
42
repos/pc/src/drivers/platform/pc/hw/util.h
Normal file
42
repos/pc/src/drivers/platform/pc/hw/util.h
Normal file
@ -0,0 +1,42 @@
|
||||
/*
|
||||
* \brief Common utilities
|
||||
* \author Martin Stein
|
||||
* \author Stefan Kalkowski
|
||||
* \date 2012-01-02
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2012-2017 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
#ifndef _SRC__LIB__HW__UTIL_H_
|
||||
#define _SRC__LIB__HW__UTIL_H_
|
||||
|
||||
namespace Hw {
|
||||
|
||||
using Genode::addr_t;
|
||||
using Genode::size_t;
|
||||
|
||||
/**
|
||||
* Return an address rounded down to a specific alignment
|
||||
*
|
||||
* \param addr original address
|
||||
* \param alignm_log2 log2 of the required alignment
|
||||
*/
|
||||
constexpr addr_t trunc(addr_t addr, addr_t alignm_log2) {
|
||||
return (addr >> alignm_log2) << alignm_log2; }
|
||||
|
||||
/**
|
||||
* Return wether a pointer fullfills an alignment
|
||||
*
|
||||
* \param p pointer
|
||||
* \param alignm_log2 log2 of the required alignment
|
||||
*/
|
||||
inline bool aligned(void * const p, addr_t alignm_log2) {
|
||||
return (addr_t)p == trunc((addr_t)p, alignm_log2); }
|
||||
}
|
||||
|
||||
#endif /* _SRC__LIB__HW__UTIL_H_ */
|
70
repos/pc/src/drivers/platform/pc/intel/context_table.cc
Normal file
70
repos/pc/src/drivers/platform/pc/intel/context_table.cc
Normal file
@ -0,0 +1,70 @@
|
||||
/*
|
||||
* \brief Intel IOMMU Context Table implementation
|
||||
* \author Johannes Schlatow
|
||||
* \date 2023-08-31
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2023 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
/* local includes */
|
||||
#include <intel/context_table.h>
|
||||
#include <intel/report_helper.h>
|
||||
#include <intel/page_table.h>
|
||||
|
||||
static void attribute_hex(Genode::Xml_generator & xml, char const * name,
|
||||
unsigned long long value)
|
||||
{
|
||||
xml.attribute(name, Genode::String<32>(Genode::Hex(value)));
|
||||
}
|
||||
|
||||
|
||||
void Intel::Context_table::generate(Xml_generator & xml,
|
||||
Env & env,
|
||||
Report_helper & report_helper)
|
||||
{
|
||||
for_each(0, [&] (Pci::rid_t id) {
|
||||
if (!present(id))
|
||||
return;
|
||||
|
||||
xml.node("context_entry", [&] () {
|
||||
addr_t stage2_addr = stage2_pointer(id);
|
||||
|
||||
xml.attribute("device", Pci::Bdf::Routing_id::Device::get(id));
|
||||
xml.attribute("function", Pci::Bdf::Routing_id::Function::get(id));
|
||||
attribute_hex(xml, "hi", hi(id));
|
||||
attribute_hex(xml, "lo", lo(id));
|
||||
attribute_hex(xml, "domain", domain(id));
|
||||
attribute_hex(xml, "agaw", agaw(id));
|
||||
attribute_hex(xml, "type", translation_type(id));
|
||||
attribute_hex(xml, "stage2_table", stage2_addr);
|
||||
xml.attribute("fault_processing", !fault_processing_disabled(id));
|
||||
|
||||
switch (agaw(id)) {
|
||||
case Hi::Address_width::AGAW_3_LEVEL:
|
||||
using Table3 = Intel::Level_3_translation_table;
|
||||
|
||||
/* dump stage2 table */
|
||||
report_helper.with_table<Table3>(stage2_addr,
|
||||
[&] (Table3 & stage2_table) {
|
||||
stage2_table.generate(xml, env, report_helper); });
|
||||
break;
|
||||
case Hi::Address_width::AGAW_4_LEVEL:
|
||||
using Table4 = Intel::Level_4_translation_table;
|
||||
|
||||
/* dump stage2 table */
|
||||
report_helper.with_table<Table4>(stage2_addr,
|
||||
[&] (Table4 & stage2_table) {
|
||||
stage2_table.generate(xml, env, report_helper); });
|
||||
break;
|
||||
default:
|
||||
xml.node("unsupported-agaw-error", [&] () {});
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
171
repos/pc/src/drivers/platform/pc/intel/context_table.h
Normal file
171
repos/pc/src/drivers/platform/pc/intel/context_table.h
Normal file
@ -0,0 +1,171 @@
|
||||
/*
|
||||
* \brief Intel IOMMU Context Table implementation
|
||||
* \author Johannes Schlatow
|
||||
* \date 2023-08-31
|
||||
*
|
||||
* The context table is a page-aligned 4KB size structure. It is indexed by the
|
||||
* lower 8bit of the resource id (see 9.3).
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2023 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
#ifndef _SRC__DRIVERS__PLATFORM__INTEL__CONTEXT_TABLE_H_
|
||||
#define _SRC__DRIVERS__PLATFORM__INTEL__CONTEXT_TABLE_H_
|
||||
|
||||
/* Genode includes */
|
||||
#include <base/env.h>
|
||||
#include <util/register.h>
|
||||
#include <util/xml_generator.h>
|
||||
#include <pci/types.h>
|
||||
|
||||
/* local includes */
|
||||
#include <clflush.h>
|
||||
|
||||
namespace Intel {
|
||||
using namespace Genode;
|
||||
|
||||
class Context_table;
|
||||
|
||||
/* forward declaration */
|
||||
class Report_helper;
|
||||
}
|
||||
|
||||
|
||||
class Intel::Context_table
|
||||
{
|
||||
private:
|
||||
|
||||
struct Hi : Genode::Register<64>
|
||||
{
|
||||
/* set according to SAGAW of Capability register */
|
||||
struct Address_width : Bitfield< 0, 3>
|
||||
{
|
||||
enum {
|
||||
AGAW_3_LEVEL = 0x1,
|
||||
AGAW_4_LEVEL = 0x2,
|
||||
AGAW_5_LEVEL = 0x3
|
||||
};
|
||||
};
|
||||
struct Domain : Bitfield< 8,16> { };
|
||||
};
|
||||
|
||||
struct Lo : Genode::Register<64>
|
||||
{
|
||||
struct Present : Bitfield< 0, 1> { };
|
||||
struct Ignore_faults : Bitfield< 1, 1> { };
|
||||
|
||||
/* should be 0 */
|
||||
struct Type : Bitfield< 2, 2> { };
|
||||
struct Stage2_pointer : Bitfield<12,52> { };
|
||||
};
|
||||
|
||||
typename Lo::access_t _entries[512];
|
||||
|
||||
static size_t _lo_index(Pci::rid_t rid) { return 2*(rid & 0xff); }
|
||||
static size_t _hi_index(Pci::rid_t rid) { return 2*(rid & 0xff) + 1; }
|
||||
|
||||
public:
|
||||
|
||||
template <typename FN>
|
||||
static void for_each(Pci::rid_t start, FN && fn)
|
||||
{
|
||||
Pci::rid_t rid { start };
|
||||
do {
|
||||
fn(rid);
|
||||
rid++;
|
||||
} while (rid != start + 0xFF);
|
||||
}
|
||||
|
||||
Lo::access_t lo(Pci::rid_t rid) {
|
||||
return _entries[_lo_index(rid)]; }
|
||||
|
||||
Hi::access_t hi(Pci::rid_t rid) {
|
||||
return _entries[_hi_index(rid)]; }
|
||||
|
||||
bool present(Pci::rid_t rid) {
|
||||
return Lo::Present::get(lo(rid)); }
|
||||
|
||||
uint16_t domain(Pci::rid_t rid) {
|
||||
return Hi::Domain::get(hi(rid)); }
|
||||
|
||||
uint8_t agaw(Pci::rid_t rid) {
|
||||
return Hi::Address_width::get(hi(rid)); }
|
||||
|
||||
uint8_t translation_type(Pci::rid_t rid) {
|
||||
return Lo::Type::get(lo(rid)); }
|
||||
|
||||
bool fault_processing_disabled(Pci::rid_t rid) {
|
||||
return Lo::Ignore_faults::get(lo(rid)); }
|
||||
|
||||
addr_t stage2_pointer(Pci::rid_t rid) {
|
||||
return Lo::Stage2_pointer::masked(lo(rid)); }
|
||||
|
||||
template <unsigned ADDRESS_WIDTH>
|
||||
void insert(Pci::rid_t rid, addr_t phys_addr, uint16_t domain_id,
|
||||
bool flush)
|
||||
{
|
||||
static_assert(ADDRESS_WIDTH == 39 ||
|
||||
ADDRESS_WIDTH == 48 ||
|
||||
ADDRESS_WIDTH == 57, "unsupported address width");
|
||||
|
||||
unsigned agaw;
|
||||
switch (ADDRESS_WIDTH) {
|
||||
case 39:
|
||||
agaw = Hi::Address_width::AGAW_3_LEVEL;
|
||||
break;
|
||||
case 48:
|
||||
agaw = Hi::Address_width::AGAW_4_LEVEL;
|
||||
break;
|
||||
case 57:
|
||||
agaw = Hi::Address_width::AGAW_5_LEVEL;
|
||||
break;
|
||||
default:
|
||||
error("unsupported address width");
|
||||
return;
|
||||
}
|
||||
|
||||
Hi::access_t hi_val = Hi::Address_width::bits(agaw) |
|
||||
Hi::Domain::bits(domain_id);
|
||||
_entries[_hi_index(rid)] = hi_val;
|
||||
|
||||
Lo::access_t lo_val = Lo::Present::bits(1) |
|
||||
Lo::Stage2_pointer::masked(phys_addr);
|
||||
_entries[_lo_index(rid)] = lo_val;
|
||||
|
||||
if (flush)
|
||||
Utils::clflush(&_entries[_lo_index(rid)]);
|
||||
}
|
||||
|
||||
void remove(Pci::rid_t rid, bool flush)
|
||||
{
|
||||
Lo::access_t val = lo(rid);
|
||||
Lo::Present::clear(val);
|
||||
_entries[_lo_index(rid)] = val;
|
||||
|
||||
if (flush)
|
||||
Utils::clflush(&_entries[_lo_index(rid)]);
|
||||
}
|
||||
|
||||
void generate(Xml_generator &, Env &, Intel::Report_helper &);
|
||||
|
||||
void flush_all()
|
||||
{
|
||||
for (Genode::size_t i=0; i < 512; i+=8)
|
||||
Utils::clflush(&_entries[i]);
|
||||
}
|
||||
|
||||
Context_table()
|
||||
{
|
||||
for (Genode::size_t i=0; i < 512; i++)
|
||||
_entries[i] = 0;
|
||||
}
|
||||
|
||||
} __attribute__((aligned(4096)));
|
||||
|
||||
|
||||
#endif /* _SRC__DRIVERS__PLATFORM__INTEL__CONTEXT_TABLE_H_ */
|
93
repos/pc/src/drivers/platform/pc/intel/domain_allocator.h
Normal file
93
repos/pc/src/drivers/platform/pc/intel/domain_allocator.h
Normal file
@ -0,0 +1,93 @@
|
||||
/*
|
||||
* \brief Helper for allocating domain IDs
|
||||
* \author Johannes Schlatow
|
||||
* \date 2023-08-31
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2023 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
#ifndef _SRC__DRIVERS__PLATFORM__INTEL__DOMAIN_ALLOCATOR_H_
|
||||
#define _SRC__DRIVERS__PLATFORM__INTEL__DOMAIN_ALLOCATOR_H_
|
||||
|
||||
/* Genode includes */
|
||||
#include <util/bit_allocator.h>
|
||||
#include <util/misc_math.h>
|
||||
|
||||
namespace Intel {
|
||||
using namespace Genode;
|
||||
|
||||
struct Domain_id;
|
||||
class Domain_allocator;
|
||||
|
||||
struct Out_of_domains { };
|
||||
}
|
||||
|
||||
|
||||
struct Intel::Domain_id
|
||||
{
|
||||
uint16_t value;
|
||||
|
||||
enum {
|
||||
INVALID = 0,
|
||||
MAX = (1 << 16)-1
|
||||
};
|
||||
|
||||
bool valid() {
|
||||
return value != INVALID; }
|
||||
|
||||
Domain_id() : value(INVALID) { }
|
||||
|
||||
Domain_id(size_t v)
|
||||
: value(static_cast<uint16_t>(min((size_t)MAX, v)))
|
||||
{
|
||||
if (v > MAX)
|
||||
warning("Clipping domain id: ", v, " -> ", value);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
class Intel::Domain_allocator
|
||||
{
|
||||
private:
|
||||
|
||||
using Bit_allocator = Genode::Bit_allocator<Domain_id::MAX+1>;
|
||||
|
||||
Domain_id _max_id;
|
||||
Bit_allocator _allocator { };
|
||||
|
||||
public:
|
||||
|
||||
Domain_allocator(size_t max_id)
|
||||
: _max_id(max_id)
|
||||
{ }
|
||||
|
||||
Domain_id alloc()
|
||||
{
|
||||
try {
|
||||
addr_t new_id = _allocator.alloc() + 1;
|
||||
|
||||
if (new_id > _max_id.value) {
|
||||
_allocator.free(new_id - 1);
|
||||
throw Out_of_domains();
|
||||
}
|
||||
|
||||
return Domain_id { new_id };
|
||||
} catch (typename Bit_allocator::Out_of_indices) { }
|
||||
|
||||
throw Out_of_domains();
|
||||
}
|
||||
|
||||
void free(Domain_id domain)
|
||||
{
|
||||
if (domain.valid())
|
||||
_allocator.free(domain.value - 1);
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
#endif /* _SRC__DRIVERS__PLATFORM__INTEL__DOMAIN_ALLOCATOR_H_ */
|
388
repos/pc/src/drivers/platform/pc/intel/io_mmu.cc
Normal file
388
repos/pc/src/drivers/platform/pc/intel/io_mmu.cc
Normal file
@ -0,0 +1,388 @@
|
||||
/*
|
||||
* \brief Intel IOMMU implementation
|
||||
* \author Johannes Schlatow
|
||||
* \date 2023-08-15
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2023 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
/* local includes */
|
||||
#include <intel/io_mmu.h>
|
||||
|
||||
using namespace Driver;
|
||||
|
||||
static void attribute_hex(Genode::Xml_generator & xml, char const * name,
|
||||
unsigned long long value)
|
||||
{
|
||||
xml.attribute(name, Genode::String<32>(Genode::Hex(value)));
|
||||
}
|
||||
|
||||
|
||||
template <typename TABLE>
|
||||
void Intel::Io_mmu::Domain<TABLE>::enable_pci_device(Io_mem_dataspace_capability const,
|
||||
Pci::Bdf const & bdf)
|
||||
{
|
||||
_intel_iommu.root_table().insert_context<TABLE::address_width()>(
|
||||
bdf, _translation_table_phys, _domain_id);
|
||||
|
||||
/* invalidate translation caches only if failed requests are cached */
|
||||
if (_intel_iommu.caching_mode())
|
||||
_intel_iommu.invalidate_all(_domain_id, Pci::Bdf::rid(bdf));
|
||||
else
|
||||
_intel_iommu.flush_write_buffer();
|
||||
}
|
||||
|
||||
|
||||
template <typename TABLE>
|
||||
void Intel::Io_mmu::Domain<TABLE>::disable_pci_device(Pci::Bdf const bdf)
|
||||
{
|
||||
_intel_iommu.root_table().remove_context(bdf, _translation_table_phys);
|
||||
|
||||
_intel_iommu.invalidate_all(_domain_id);
|
||||
}
|
||||
|
||||
|
||||
template <typename TABLE>
|
||||
void Intel::Io_mmu::Domain<TABLE>::add_range(Range const & range,
|
||||
addr_t const paddr,
|
||||
Dataspace_capability const)
|
||||
{
|
||||
addr_t const vaddr { range.start };
|
||||
size_t const size { range.size };
|
||||
|
||||
Page_flags flags { RW, NO_EXEC, USER, NO_GLOBAL,
|
||||
RAM, Genode::CACHED };
|
||||
|
||||
_translation_table.insert_translation(vaddr, paddr, size, flags,
|
||||
_table_allocator,
|
||||
!_intel_iommu.coherent_page_walk(),
|
||||
_intel_iommu.supported_page_sizes());
|
||||
|
||||
if (_skip_invalidation)
|
||||
return;
|
||||
|
||||
/* only invalidate iotlb if failed requests are cached */
|
||||
if (_intel_iommu.caching_mode())
|
||||
_intel_iommu.invalidate_iotlb(_domain_id, vaddr, size);
|
||||
else
|
||||
_intel_iommu.flush_write_buffer();
|
||||
}
|
||||
|
||||
|
||||
template <typename TABLE>
|
||||
void Intel::Io_mmu::Domain<TABLE>::remove_range(Range const & range)
|
||||
{
|
||||
_translation_table.remove_translation(range.start, range.size,
|
||||
_table_allocator,
|
||||
!_intel_iommu.coherent_page_walk());
|
||||
|
||||
if (!_skip_invalidation)
|
||||
_intel_iommu.invalidate_iotlb(_domain_id, range.start, range.size);
|
||||
}
|
||||
|
||||
|
||||
/* Flush write-buffer if required by hardware */
|
||||
void Intel::Io_mmu::flush_write_buffer()
|
||||
{
|
||||
if (!read<Capability::Rwbf>())
|
||||
return;
|
||||
|
||||
Global_status::access_t status = read<Global_status>();
|
||||
Global_command::access_t cmd = status;
|
||||
|
||||
/* keep status bits but clear one-shot bits */
|
||||
Global_command::Srtp::clear(cmd);
|
||||
Global_command::Sirtp::clear(cmd);
|
||||
|
||||
Global_command::Wbf::set(cmd);
|
||||
write<Global_command>(cmd);
|
||||
|
||||
/* wait until command completed */
|
||||
while (read<Global_status>() != status);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Clear IOTLB.
|
||||
*
|
||||
* By default, we perform a global invalidation. When provided with a valid
|
||||
* Domain_id, a domain-specific invalidation is conducted. If provided with
|
||||
* a DMA address and size, a page-selective invalidation is performed.
|
||||
*
|
||||
* See Table 25 for required invalidation scopes.
|
||||
*/
|
||||
void Intel::Io_mmu::invalidate_iotlb(Domain_id domain_id, addr_t, size_t)
|
||||
{
|
||||
|
||||
unsigned requested_scope = Context_command::Cirg::GLOBAL;
|
||||
if (domain_id.valid())
|
||||
requested_scope = Context_command::Cirg::DOMAIN;
|
||||
|
||||
/* wait for ongoing invalidation request to be completed */
|
||||
while (Iotlb::Invalidate::get(read_iotlb_reg()));
|
||||
|
||||
/* invalidate IOTLB */
|
||||
write_iotlb_reg(Iotlb::Invalidate::bits(1) |
|
||||
Iotlb::Iirg::bits(requested_scope) |
|
||||
Iotlb::Dr::bits(1) | Iotlb::Dw::bits(1) |
|
||||
Iotlb::Did::bits(domain_id.value));
|
||||
|
||||
/* wait for completion */
|
||||
while (Iotlb::Invalidate::get(read_iotlb_reg()));
|
||||
|
||||
/* check for errors */
|
||||
unsigned actual_scope = Iotlb::Iaig::get(read_iotlb_reg());
|
||||
if (!actual_scope)
|
||||
error("IOTLB invalidation failed (scope=", requested_scope, ")");
|
||||
else if (_verbose && actual_scope < requested_scope)
|
||||
warning("Performed IOTLB invalidation with different granularity ",
|
||||
"(requested=", requested_scope, ", actual=", actual_scope, ")");
|
||||
|
||||
/* XXX implement page-selective-within-domain IOTLB invalidation */
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear context cache and IOTLB.
|
||||
*
|
||||
* By default, we perform a global invalidation. When provided with a valid
|
||||
* Domain_id, a domain-specific invalidation is conducted. When a rid is
|
||||
* provided, a device-specific invalidation is done.
|
||||
*
|
||||
* See Table 25 for required invalidation scopes.
|
||||
*/
|
||||
void Intel::Io_mmu::invalidate_all(Domain_id domain_id, Pci::rid_t rid)
|
||||
{
|
||||
/**
|
||||
* We are using the register-based invalidation interface for the
|
||||
* moment. This is only supported in legacy mode and for major
|
||||
* architecture version 5 and lower (cf. 6.5).
|
||||
*/
|
||||
|
||||
if (read<Version::Major>() > 5) {
|
||||
error("Unable to invalidate caches: Register-based invalidation only ",
|
||||
"supported in architecture versions 5 and lower");
|
||||
return;
|
||||
}
|
||||
|
||||
/* make sure that there is no context invalidation ongoing */
|
||||
while (read<Context_command::Invalidate>());
|
||||
|
||||
unsigned requested_scope = Context_command::Cirg::GLOBAL;
|
||||
if (domain_id.valid())
|
||||
requested_scope = Context_command::Cirg::DOMAIN;
|
||||
|
||||
if (rid != 0)
|
||||
requested_scope = Context_command::Cirg::DEVICE;
|
||||
|
||||
/* clear context cache */
|
||||
write<Context_command>(Context_command::Invalidate::bits(1) |
|
||||
Context_command::Cirg::bits(requested_scope) |
|
||||
Context_command::Sid::bits(rid) |
|
||||
Context_command::Did::bits(domain_id.value));
|
||||
|
||||
|
||||
/* wait for completion */
|
||||
while (read<Context_command::Invalidate>());
|
||||
|
||||
/* check for errors */
|
||||
unsigned actual_scope = read<Context_command::Caig>();
|
||||
if (!actual_scope)
|
||||
error("Context-cache invalidation failed (scope=", requested_scope, ")");
|
||||
else if (_verbose && actual_scope < requested_scope)
|
||||
warning("Performed context-cache invalidation with different granularity ",
|
||||
"(requested=", requested_scope, ", actual=", actual_scope, ")");
|
||||
|
||||
/* XXX clear PASID cache if we ever switch from legacy mode translation */
|
||||
|
||||
invalidate_iotlb(domain_id, 0, 0);
|
||||
}
|
||||
|
||||
|
||||
void Intel::Io_mmu::_handle_faults()
|
||||
{
|
||||
if (_fault_irq.constructed())
|
||||
_fault_irq->ack_irq();
|
||||
|
||||
if (read<Fault_status::Pending>()) {
|
||||
if (read<Fault_status::Overflow>())
|
||||
error("Fault recording overflow");
|
||||
|
||||
if (read<Fault_status::Iqe>())
|
||||
error("Invalidation queue error");
|
||||
|
||||
/* acknowledge all faults */
|
||||
write<Fault_status>(0x7d);
|
||||
|
||||
error("Faults records for ", name());
|
||||
unsigned num_registers = read<Capability::Nfr>() + 1;
|
||||
for (unsigned i = read<Fault_status::Fri>(); ; i = (i + 1) % num_registers) {
|
||||
Fault_record_hi::access_t hi = read_fault_record<Fault_record_hi>(i);
|
||||
|
||||
if (!Fault_record_hi::Fault::get(hi))
|
||||
break;
|
||||
|
||||
Fault_record_hi::access_t lo = read_fault_record<Fault_record_lo>(i);
|
||||
|
||||
error("Fault: hi=", Hex(hi),
|
||||
", reason=", Hex(Fault_record_hi::Reason::get(hi)),
|
||||
", type=", Hex(Fault_record_hi::Type::get(hi)),
|
||||
", AT=", Hex(Fault_record_hi::At::get(hi)),
|
||||
", EXE=", Hex(Fault_record_hi::Exe::get(hi)),
|
||||
", PRIV=", Hex(Fault_record_hi::Priv::get(hi)),
|
||||
", PP=", Hex(Fault_record_hi::Pp::get(hi)),
|
||||
", Source=", Hex(Fault_record_hi::Source::get(hi)),
|
||||
", info=", Hex(Fault_record_lo::Info::get(lo)));
|
||||
|
||||
|
||||
clear_fault_record(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Intel::Io_mmu::generate(Xml_generator & xml)
|
||||
{
|
||||
xml.node("intel", [&] () {
|
||||
xml.attribute("name", name());
|
||||
|
||||
const bool enabled = (bool)read<Global_status::Enabled>();
|
||||
const bool rtps = (bool)read<Global_status::Rtps>();
|
||||
const bool ires = (bool)read<Global_status::Ires>();
|
||||
const bool irtps = (bool)read<Global_status::Irtps>();
|
||||
const bool cfis = (bool)read<Global_status::Cfis>();
|
||||
|
||||
xml.attribute("dma_remapping", enabled && rtps);
|
||||
xml.attribute("msi_remapping", ires && irtps);
|
||||
xml.attribute("irq_remapping", ires && irtps && !cfis);
|
||||
|
||||
/* dump registers */
|
||||
xml.attribute("version", String<16>(read<Version::Major>(), ".",
|
||||
read<Version::Minor>()));
|
||||
|
||||
xml.node("register", [&] () {
|
||||
xml.attribute("name", "Capability");
|
||||
attribute_hex(xml, "value", read<Capability>());
|
||||
xml.attribute("esrtps", (bool)read<Capability::Esrtps>());
|
||||
xml.attribute("esirtps", (bool)read<Capability::Esirtps>());
|
||||
xml.attribute("rwbf", (bool)read<Capability::Rwbf>());
|
||||
xml.attribute("nfr", read<Capability::Nfr>());
|
||||
xml.attribute("domains", read<Capability::Domains>());
|
||||
xml.attribute("caching", (bool)read<Capability::Caching_mode>());
|
||||
});
|
||||
|
||||
xml.node("register", [&] () {
|
||||
xml.attribute("name", "Extended Capability");
|
||||
attribute_hex(xml, "value", read<Extended_capability>());
|
||||
xml.attribute("interrupt_remapping",
|
||||
(bool)read<Extended_capability::Ir>());
|
||||
xml.attribute("page_walk_coherency",
|
||||
(bool)read<Extended_capability::Page_walk_coherency>());
|
||||
});
|
||||
|
||||
xml.node("register", [&] () {
|
||||
xml.attribute("name", "Global Status");
|
||||
attribute_hex(xml, "value", read<Global_status>());
|
||||
xml.attribute("qies", (bool)read<Global_status::Qies>());
|
||||
xml.attribute("ires", (bool)read<Global_status::Ires>());
|
||||
xml.attribute("rtps", (bool)read<Global_status::Rtps>());
|
||||
xml.attribute("irtps", (bool)read<Global_status::Irtps>());
|
||||
xml.attribute("cfis", (bool)read<Global_status::Cfis>());
|
||||
xml.attribute("enabled", (bool)read<Global_status::Enabled>());
|
||||
});
|
||||
|
||||
if (!_verbose)
|
||||
return;
|
||||
|
||||
xml.node("register", [&] () {
|
||||
xml.attribute("name", "Fault Status");
|
||||
attribute_hex(xml, "value", read<Fault_status>());
|
||||
attribute_hex(xml, "fri", read<Fault_status::Fri>());
|
||||
xml.attribute("iqe", (bool)read<Fault_status::Iqe>());
|
||||
xml.attribute("ppf", (bool)read<Fault_status::Pending>());
|
||||
xml.attribute("pfo", (bool)read<Fault_status::Overflow>());
|
||||
});
|
||||
|
||||
xml.node("register", [&] () {
|
||||
xml.attribute("name", "Fault Event Control");
|
||||
attribute_hex(xml, "value", read<Fault_event_control>());
|
||||
xml.attribute("mask", (bool)read<Fault_event_control::Mask>());
|
||||
});
|
||||
|
||||
if (!read<Global_status::Rtps>())
|
||||
return;
|
||||
|
||||
addr_t rt_addr = Root_table_address::Address::masked(read<Root_table_address>());
|
||||
|
||||
xml.node("register", [&] () {
|
||||
xml.attribute("name", "Root Table Address");
|
||||
attribute_hex(xml, "value", rt_addr);
|
||||
});
|
||||
|
||||
if (read<Root_table_address::Mode>() != Root_table_address::Mode::LEGACY) {
|
||||
error("Only supporting legacy translation mode");
|
||||
return;
|
||||
}
|
||||
|
||||
/* dump root table, context table, and page tables */
|
||||
_report_helper.with_table<Root_table>(rt_addr,
|
||||
[&] (Root_table & root_table) {
|
||||
root_table.generate(xml, _env, _report_helper);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
Intel::Io_mmu::Io_mmu(Env & env,
|
||||
Io_mmu_devices & io_mmu_devices,
|
||||
Device::Name const & name,
|
||||
Device::Io_mem::Range range,
|
||||
Context_table_allocator & table_allocator,
|
||||
unsigned irq_number)
|
||||
: Attached_mmio(env, range.start, range.size),
|
||||
Driver::Io_mmu(io_mmu_devices, name),
|
||||
_env(env),
|
||||
_managed_root_table(_env, table_allocator, *this, !coherent_page_walk()),
|
||||
_domain_allocator(_max_domains()-1)
|
||||
{
|
||||
if (!read<Capability::Sagaw_4_level>() && !read<Capability::Sagaw_3_level>()) {
|
||||
error("IOMMU does not support 3- or 4-level page tables");
|
||||
return;
|
||||
}
|
||||
|
||||
/* caches must be cleared if Esrtps is not set (see 6.6) */
|
||||
if (!read<Capability::Esrtps>())
|
||||
invalidate_all();
|
||||
else if (read<Global_status::Enabled>()) {
|
||||
error("IOMMU already enabled");
|
||||
return;
|
||||
}
|
||||
|
||||
/* enable fault event interrupts */
|
||||
if (irq_number) {
|
||||
_fault_irq.construct(_env, irq_number, 0, Irq_session::TYPE_MSI);
|
||||
|
||||
_fault_irq->sigh(_fault_handler);
|
||||
_fault_irq->ack_irq();
|
||||
|
||||
Irq_session::Info info = _fault_irq->info();
|
||||
if (info.type == Irq_session::Info::INVALID)
|
||||
error("Unable to enable fault event interrupts for ", name);
|
||||
else {
|
||||
write<Fault_event_address>((Fault_event_address::access_t)info.address);
|
||||
write<Fault_event_data>((Fault_event_data::access_t)info.value);
|
||||
write<Fault_event_control::Mask>(0);
|
||||
}
|
||||
}
|
||||
|
||||
/* set root table address */
|
||||
write<Root_table_address>(
|
||||
Root_table_address::Address::masked(_managed_root_table.phys_addr()));
|
||||
|
||||
/* issue set root table pointer command*/
|
||||
_global_command<Global_command::Srtp>(1);
|
||||
}
|
603
repos/pc/src/drivers/platform/pc/intel/io_mmu.h
Normal file
603
repos/pc/src/drivers/platform/pc/intel/io_mmu.h
Normal file
@ -0,0 +1,603 @@
|
||||
/*
|
||||
* \brief Intel IOMMU implementation
|
||||
* \author Johannes Schlatow
|
||||
* \date 2023-08-15
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2023 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
#ifndef _SRC__DRIVERS__PLATFORM__INTEL__IO_MMU_H_
|
||||
#define _SRC__DRIVERS__PLATFORM__INTEL__IO_MMU_H_
|
||||
|
||||
/* Genode includes */
|
||||
#include <os/attached_mmio.h>
|
||||
#include <util/register_set.h>
|
||||
#include <base/allocator.h>
|
||||
#include <base/attached_ram_dataspace.h>
|
||||
|
||||
/* Platform-driver includes */
|
||||
#include <device.h>
|
||||
#include <io_mmu.h>
|
||||
#include <dma_allocator.h>
|
||||
|
||||
/* local includes */
|
||||
#include <intel/managed_root_table.h>
|
||||
#include <intel/report_helper.h>
|
||||
#include <intel/page_table.h>
|
||||
#include <intel/domain_allocator.h>
|
||||
#include <expanding_page_table_allocator.h>
|
||||
|
||||
namespace Intel {
|
||||
using namespace Genode;
|
||||
using namespace Driver;
|
||||
|
||||
using Context_table_allocator = Managed_root_table::Allocator;
|
||||
|
||||
class Io_mmu;
|
||||
class Io_mmu_factory;
|
||||
}
|
||||
|
||||
|
||||
class Intel::Io_mmu : private Attached_mmio,
|
||||
public Driver::Io_mmu,
|
||||
private Translation_table_registry
|
||||
{
|
||||
public:
|
||||
|
||||
/* Use derived domain class to store reference to buffer registry */
|
||||
template <typename TABLE>
|
||||
class Domain : public Driver::Io_mmu::Domain,
|
||||
public Registered_translation_table
|
||||
{
|
||||
private:
|
||||
|
||||
using Table_allocator = Expanding_page_table_allocator<4096>;
|
||||
|
||||
Intel::Io_mmu & _intel_iommu;
|
||||
Env & _env;
|
||||
Ram_allocator & _ram_alloc;
|
||||
Registry<Dma_buffer> const & _buffer_registry;
|
||||
|
||||
Table_allocator _table_allocator;
|
||||
Domain_allocator & _domain_allocator;
|
||||
Domain_id _domain_id { _domain_allocator.alloc() };
|
||||
bool _skip_invalidation { false };
|
||||
|
||||
addr_t _translation_table_phys {
|
||||
_table_allocator.construct<TABLE>() };
|
||||
|
||||
TABLE & _translation_table {
|
||||
*(TABLE*)virt_addr(_translation_table_phys) };
|
||||
|
||||
struct Invalidation_guard
|
||||
{
|
||||
Domain<TABLE> & _domain;
|
||||
bool _requires_invalidation;
|
||||
|
||||
Invalidation_guard(Domain<TABLE> & domain, bool required=true)
|
||||
: _domain(domain),
|
||||
_requires_invalidation(required)
|
||||
{
|
||||
_domain._skip_invalidation = true;
|
||||
}
|
||||
|
||||
~Invalidation_guard()
|
||||
{
|
||||
_domain._skip_invalidation = false;
|
||||
|
||||
if (_requires_invalidation)
|
||||
_domain._intel_iommu.invalidate_all(_domain._domain_id);
|
||||
else
|
||||
_domain._intel_iommu.flush_write_buffer();
|
||||
}
|
||||
};
|
||||
|
||||
friend struct Invalidation_guard;
|
||||
|
||||
public:
|
||||
|
||||
void enable_pci_device(Io_mem_dataspace_capability const,
|
||||
Pci::Bdf const &) override;
|
||||
void disable_pci_device(Pci::Bdf const &) override;
|
||||
|
||||
void add_range(Range const &,
|
||||
addr_t const,
|
||||
Dataspace_capability const) override;
|
||||
void remove_range(Range const &) override;
|
||||
|
||||
/* Registered_translation_table interface */
|
||||
addr_t virt_addr(addr_t phys_addr) override
|
||||
{
|
||||
addr_t va { 0 };
|
||||
|
||||
_table_allocator.with_table<TABLE>(phys_addr,
|
||||
[&] (TABLE & t) { va = (addr_t)&t; },
|
||||
[&] () { va = 0; });
|
||||
|
||||
return va;
|
||||
}
|
||||
|
||||
Domain(Intel::Io_mmu & intel_iommu,
|
||||
Allocator & md_alloc,
|
||||
Registry<Dma_buffer> const & buffer_registry,
|
||||
Env & env,
|
||||
Ram_allocator & ram_alloc,
|
||||
Domain_allocator & domain_allocator)
|
||||
: Driver::Io_mmu::Domain(intel_iommu, md_alloc),
|
||||
Registered_translation_table(intel_iommu),
|
||||
_intel_iommu(intel_iommu),
|
||||
_env(env),
|
||||
_ram_alloc(ram_alloc),
|
||||
_buffer_registry(buffer_registry),
|
||||
_table_allocator(_env, md_alloc, ram_alloc, 2),
|
||||
_domain_allocator(domain_allocator)
|
||||
{
|
||||
Invalidation_guard guard { *this, _intel_iommu.caching_mode() };
|
||||
|
||||
_buffer_registry.for_each([&] (Dma_buffer const & buf) {
|
||||
add_range({ buf.dma_addr, buf.size }, buf.phys_addr, buf.cap); });
|
||||
}
|
||||
|
||||
~Domain() override
|
||||
{
|
||||
{
|
||||
Invalidation_guard guard { *this };
|
||||
|
||||
_intel_iommu.root_table().remove_context(_translation_table_phys);
|
||||
|
||||
_buffer_registry.for_each([&] (Dma_buffer const & buf) {
|
||||
remove_range({ buf.dma_addr, buf.size });
|
||||
});
|
||||
|
||||
_table_allocator.destruct<TABLE>(
|
||||
_translation_table_phys);
|
||||
}
|
||||
|
||||
_domain_allocator.free(_domain_id);
|
||||
}
|
||||
};
|
||||
|
||||
private:
|
||||
|
||||
Env & _env;
|
||||
|
||||
/**
|
||||
* For a start, we keep a distinct root table for every hardware unit.
|
||||
*
|
||||
* This doubles RAM requirements for allocating page tables when devices
|
||||
* in the scope of different hardware units are used in the same session,
|
||||
* yet simplifies the implementation. In order to use a single root table
|
||||
* for all hardware units, we'd need to have a single Io_mmu object
|
||||
* controlling all hardware units. Otherwise, the session component will
|
||||
* create separate Domain objects that receive identical modification
|
||||
* instructions.
|
||||
*/
|
||||
bool _verbose { false };
|
||||
Managed_root_table _managed_root_table;
|
||||
Report_helper _report_helper { *this };
|
||||
Domain_allocator _domain_allocator;
|
||||
Constructible<Irq_connection> _fault_irq { };
|
||||
Signal_handler<Io_mmu> _fault_handler {
|
||||
_env.ep(), *this, &Io_mmu::_handle_faults };
|
||||
|
||||
/**
|
||||
* Registers
|
||||
*/
|
||||
|
||||
struct Version : Register<0x0, 32>
|
||||
{
|
||||
struct Minor : Bitfield<0, 4> { };
|
||||
struct Major : Bitfield<4, 4> { };
|
||||
};
|
||||
|
||||
struct Capability : Register<0x8, 64>
|
||||
{
|
||||
/* enhanced set root table pointer support */
|
||||
struct Esrtps : Bitfield<63,1> { };
|
||||
|
||||
/* enhanced set irq table pointer support */
|
||||
struct Esirtps : Bitfield<62,1> { };
|
||||
|
||||
/* number of fault-recording registers (n-1) */
|
||||
struct Nfr : Bitfield<40,8> { };
|
||||
|
||||
struct Page_1GB : Bitfield<35,1> { };
|
||||
struct Page_2MB : Bitfield<34,1> { };
|
||||
|
||||
/* fault recording register offset */
|
||||
struct Fro : Bitfield<24,10> { };
|
||||
|
||||
struct Sagaw_5_level : Bitfield<11,1> { };
|
||||
struct Sagaw_4_level : Bitfield<10,1> { };
|
||||
struct Sagaw_3_level : Bitfield< 9,1> { };
|
||||
|
||||
struct Caching_mode : Bitfield<7,1> { };
|
||||
|
||||
struct Rwbf : Bitfield<4,1> { };
|
||||
|
||||
struct Domains : Bitfield<0,3> { };
|
||||
|
||||
};
|
||||
|
||||
struct Extended_capability : Register<0x10, 64>
|
||||
{
|
||||
/* IOTLB register offset */
|
||||
struct Iro : Bitfield<8,10> { };
|
||||
|
||||
/* interrupt remapping support */
|
||||
struct Ir : Bitfield<3,1> { };
|
||||
|
||||
struct Page_walk_coherency : Bitfield<0,1> { };
|
||||
};
|
||||
|
||||
struct Global_command : Register<0x18, 32>
|
||||
{
|
||||
struct Enable : Bitfield<31,1> { };
|
||||
|
||||
/* set root table pointer */
|
||||
struct Srtp : Bitfield<30,1> { };
|
||||
|
||||
/* write-buffer flush */
|
||||
struct Wbf : Bitfield<27,1> { };
|
||||
|
||||
/* set interrupt remap table pointer */
|
||||
struct Sirtp : Bitfield<24,1> { };
|
||||
};
|
||||
|
||||
struct Global_status : Register<0x1c, 32>
|
||||
{
|
||||
struct Enabled : Bitfield<31,1> { };
|
||||
|
||||
/* root table pointer status */
|
||||
struct Rtps : Bitfield<30,1> { };
|
||||
|
||||
/* write-buffer flush status */
|
||||
struct Wbfs : Bitfield<27,1> { };
|
||||
|
||||
/* queued invalidation enable status */
|
||||
struct Qies : Bitfield<26,1> { };
|
||||
|
||||
/* interrupt remapping enable status */
|
||||
struct Ires : Bitfield<25,1> { };
|
||||
|
||||
/* interrupt remapping table pointer status */
|
||||
struct Irtps : Bitfield<24,1> { };
|
||||
|
||||
/* compatibility format interrupts */
|
||||
struct Cfis : Bitfield<23,1> { };
|
||||
};
|
||||
|
||||
struct Root_table_address : Register<0x20, 64>
|
||||
{
|
||||
struct Mode : Bitfield<10, 2> { enum { LEGACY = 0x00 }; };
|
||||
struct Address : Bitfield<12,52> { };
|
||||
};
|
||||
|
||||
struct Context_command : Register<0x28, 64>
|
||||
{
|
||||
struct Invalidate : Bitfield<63,1> { };
|
||||
|
||||
/* invalidation request granularity */
|
||||
struct Cirg : Bitfield<61,2>
|
||||
{
|
||||
enum {
|
||||
GLOBAL = 0x1,
|
||||
DOMAIN = 0x2,
|
||||
DEVICE = 0x3
|
||||
};
|
||||
};
|
||||
|
||||
/* actual invalidation granularity */
|
||||
struct Caig : Bitfield<59,2> { };
|
||||
|
||||
/* source id */
|
||||
struct Sid : Bitfield<16,16> { };
|
||||
|
||||
/* domain id */
|
||||
struct Did : Bitfield<0,16> { };
|
||||
};
|
||||
|
||||
struct Fault_status : Register<0x34, 32>
|
||||
{
|
||||
/* fault record index */
|
||||
struct Fri : Bitfield<8,8> { };
|
||||
|
||||
/* invalidation queue error */
|
||||
struct Iqe : Bitfield<4,1> { };
|
||||
|
||||
/* primary pending fault */
|
||||
struct Pending : Bitfield<1,1> { };
|
||||
|
||||
/* primary fault overflow */
|
||||
struct Overflow : Bitfield<0,1> { };
|
||||
};
|
||||
|
||||
struct Fault_event_control : Register<0x38, 32>
|
||||
{
|
||||
struct Mask : Bitfield<31,1> { };
|
||||
};
|
||||
|
||||
struct Fault_event_data : Register<0x3c, 32>
|
||||
{ };
|
||||
|
||||
struct Fault_event_address : Register<0x40, 32>
|
||||
{ };
|
||||
|
||||
/* IOTLB registers may be at offsets 0 to 1024*16 */
|
||||
struct All_registers : Register_array<0x0, 64, 256, 64>
|
||||
{ };
|
||||
|
||||
struct Fault_record_hi : Genode::Register<64>
|
||||
{
|
||||
static unsigned offset() { return 1; }
|
||||
|
||||
struct Fault : Bitfield<63,1> { };
|
||||
struct Type1 : Bitfield<62,1> { };
|
||||
|
||||
/* address type */
|
||||
struct At : Bitfield<60,2> { };
|
||||
|
||||
struct Pasid : Bitfield<40,10> { };
|
||||
struct Reason : Bitfield<32, 8> { };
|
||||
|
||||
/* PASID present */
|
||||
struct Pp : Bitfield<31,1> { };
|
||||
|
||||
/* execute permission requested */
|
||||
struct Exe : Bitfield<30,1> { };
|
||||
|
||||
/* privilege mode requested */
|
||||
struct Priv : Bitfield<29,1> { };
|
||||
struct Type2 : Bitfield<28,1> { };
|
||||
struct Source : Bitfield<0,16> { };
|
||||
|
||||
struct Type : Bitset_2<Type1, Type2>
|
||||
{
|
||||
enum {
|
||||
WRITE_REQUEST = 0x0,
|
||||
READ_REQUEST = 0x1,
|
||||
PAGE_REQUEST = 0x2,
|
||||
ATOMIC_REQUEST = 0x3
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
struct Fault_record_lo : Genode::Register<64>
|
||||
{
|
||||
static unsigned offset() { return 0; }
|
||||
|
||||
struct Info : Bitfield<12,52> { };
|
||||
};
|
||||
|
||||
struct Iotlb : Genode::Register<64>
|
||||
{
|
||||
struct Invalidate : Bitfield<63,1> { };
|
||||
|
||||
/* IOTLB invalidation request granularity */
|
||||
struct Iirg : Bitfield<60,2>
|
||||
{
|
||||
enum {
|
||||
GLOBAL = 0x1,
|
||||
DOMAIN = 0x2,
|
||||
DEVICE = 0x3
|
||||
};
|
||||
};
|
||||
|
||||
/* IOTLB actual invalidation granularity */
|
||||
struct Iaig : Bitfield<57,2> { };
|
||||
|
||||
/* drain reads/writes */
|
||||
struct Dr : Bitfield<49,1> { };
|
||||
struct Dw : Bitfield<48,1> { };
|
||||
|
||||
/* domain id */
|
||||
struct Did : Bitfield<32,16> { };
|
||||
|
||||
};
|
||||
|
||||
uint32_t _max_domains() {
|
||||
return 1 << (4 + read<Capability::Domains>()*2); }
|
||||
|
||||
template <typename BIT>
|
||||
void _global_command(bool set)
|
||||
{
|
||||
Global_status::access_t status = read<Global_status>();
|
||||
Global_command::access_t cmd = status;
|
||||
|
||||
/* keep status bits but clear one-shot bits */
|
||||
Global_command::Srtp::clear(cmd);
|
||||
Global_command::Sirtp::clear(cmd);
|
||||
|
||||
if (set) {
|
||||
BIT::set(cmd);
|
||||
BIT::set(status);
|
||||
} else {
|
||||
BIT::clear(cmd);
|
||||
BIT::clear(status);
|
||||
}
|
||||
|
||||
/* write command */
|
||||
write<Global_command>(cmd);
|
||||
|
||||
/* wait until command completed */
|
||||
while (read<Global_status>() != status);
|
||||
}
|
||||
|
||||
template <typename BITFIELD>
|
||||
unsigned long _offset()
|
||||
{
|
||||
/* BITFIELD denotes registers offset counting 128-bit as one register */
|
||||
unsigned offset = read<BITFIELD>();
|
||||
|
||||
/* return 64-bit register offset */
|
||||
return offset*2;
|
||||
}
|
||||
|
||||
template <typename OFFSET_BITFIELD>
|
||||
void write_offset_register(unsigned index, All_registers::access_t value) {
|
||||
write<All_registers>(value, _offset<OFFSET_BITFIELD>() + index); }
|
||||
|
||||
template <typename OFFSET_BITFIELD>
|
||||
All_registers::access_t read_offset_register(unsigned index) {
|
||||
return read<All_registers>(_offset<OFFSET_BITFIELD>() + index); }
|
||||
|
||||
void write_iotlb_reg(Iotlb::access_t v) {
|
||||
write_offset_register<Extended_capability::Iro>(1, v); }
|
||||
|
||||
Iotlb::access_t read_iotlb_reg() {
|
||||
return read_offset_register<Extended_capability::Iro>(1); }
|
||||
|
||||
template <typename REG>
|
||||
REG::access_t read_fault_record(unsigned index) {
|
||||
return read_offset_register<Capability::Fro>(index*2 + REG::offset()); }
|
||||
|
||||
void clear_fault_record(unsigned index) {
|
||||
write_offset_register<Capability::Fro>(index*2 + Fault_record_hi::offset(),
|
||||
Fault_record_hi::Fault::bits(1));
|
||||
}
|
||||
|
||||
void _handle_faults();
|
||||
|
||||
/**
|
||||
* Io_mmu interface
|
||||
*/
|
||||
|
||||
void _enable() override {
|
||||
_global_command<Global_command::Enable>(1);
|
||||
|
||||
if (_verbose)
|
||||
log("enabled IOMMU ", name());
|
||||
}
|
||||
|
||||
void _disable() override
|
||||
{
|
||||
_global_command<Global_command::Enable>(0);
|
||||
|
||||
if (_verbose)
|
||||
log("disabled IOMMU ", name());
|
||||
}
|
||||
|
||||
const uint32_t _supported_page_sizes {
|
||||
read<Capability::Page_1GB>() << 30 |
|
||||
read<Capability::Page_2MB>() << 21 | 1u << 12 };
|
||||
|
||||
public:
|
||||
|
||||
Managed_root_table & root_table() { return _managed_root_table; }
|
||||
|
||||
void generate(Xml_generator &) override;
|
||||
|
||||
void invalidate_iotlb(Domain_id, addr_t, size_t);
|
||||
void invalidate_all(Domain_id domain = Domain_id { Domain_id::INVALID }, Pci::rid_t = 0);
|
||||
|
||||
bool coherent_page_walk() const { return read<Extended_capability::Page_walk_coherency>(); }
|
||||
bool caching_mode() const { return read<Capability::Caching_mode>(); }
|
||||
uint32_t supported_page_sizes() const { return _supported_page_sizes; }
|
||||
|
||||
void flush_write_buffer();
|
||||
|
||||
|
||||
/**
|
||||
* Io_mmu interface
|
||||
*/
|
||||
|
||||
Driver::Io_mmu::Domain & create_domain(Allocator & md_alloc,
|
||||
Ram_allocator & ram_alloc,
|
||||
Registry<Dma_buffer> const & buffer_registry,
|
||||
Ram_quota_guard &,
|
||||
Cap_quota_guard &) override
|
||||
{
|
||||
if (read<Capability::Sagaw_4_level>())
|
||||
return *new (md_alloc)
|
||||
Intel::Io_mmu::Domain<Level_4_translation_table>(*this,
|
||||
md_alloc,
|
||||
buffer_registry,
|
||||
_env,
|
||||
ram_alloc,
|
||||
_domain_allocator);
|
||||
|
||||
if (!read<Capability::Sagaw_3_level>() && read<Capability::Sagaw_5_level>())
|
||||
error("IOMMU requires 5-level translation tables (not implemented)");
|
||||
|
||||
return *new (md_alloc)
|
||||
Intel::Io_mmu::Domain<Level_3_translation_table>(*this,
|
||||
md_alloc,
|
||||
buffer_registry,
|
||||
_env,
|
||||
ram_alloc,
|
||||
_domain_allocator);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor/Destructor
|
||||
*/
|
||||
|
||||
Io_mmu(Env & env,
|
||||
Io_mmu_devices & io_mmu_devices,
|
||||
Device::Name const & name,
|
||||
Device::Io_mem::Range range,
|
||||
Context_table_allocator & table_allocator,
|
||||
unsigned irq_number);
|
||||
|
||||
~Io_mmu() { _destroy_domains(); }
|
||||
};
|
||||
|
||||
|
||||
class Intel::Io_mmu_factory : public Driver::Io_mmu_factory
|
||||
{
|
||||
private:
|
||||
|
||||
using Table_array = Context_table_allocator::Array<510>;
|
||||
|
||||
Genode::Env & _env;
|
||||
|
||||
/* Allocate 2MB RAM for root table and 256 context tables */
|
||||
Attached_ram_dataspace _allocator_ds { _env.ram(),
|
||||
_env.rm(),
|
||||
2*1024*1024,
|
||||
Cache::CACHED };
|
||||
|
||||
/* add page-table allocator array at _allocator_ds.local_addr() */
|
||||
Table_array & _table_array { *Genode::construct_at<Table_array>(
|
||||
_allocator_ds.local_addr<void>(),
|
||||
[&] (void *) {
|
||||
return _env.pd().dma_addr(_allocator_ds.cap());
|
||||
})};
|
||||
|
||||
/* We use a single allocator for context tables for all IOMMU devices */
|
||||
Context_table_allocator & _table_allocator { _table_array.alloc() };
|
||||
|
||||
public:
|
||||
|
||||
Io_mmu_factory(Genode::Env & env, Registry<Driver::Io_mmu_factory> & registry)
|
||||
: Driver::Io_mmu_factory(registry, Device::Type { "intel_iommu" }),
|
||||
_env(env)
|
||||
{ }
|
||||
|
||||
void create(Allocator & alloc, Io_mmu_devices & io_mmu_devices, Device const & device) override
|
||||
{
|
||||
using Range = Device::Io_mem::Range;
|
||||
|
||||
unsigned irq_number { 0 };
|
||||
device.for_each_irq([&] (unsigned idx, unsigned nbr, Irq_session::Type,
|
||||
Irq_session::Polarity, Irq_session::Trigger, bool)
|
||||
{
|
||||
if (idx == 0)
|
||||
irq_number = nbr;
|
||||
});
|
||||
|
||||
device.for_each_io_mem([&] (unsigned idx, Range range, Device::Pci_bar, bool)
|
||||
{
|
||||
if (idx == 0)
|
||||
new (alloc) Intel::Io_mmu(_env, io_mmu_devices, device.name(),
|
||||
range, _table_allocator, irq_number);
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
#endif /* _SRC__DRIVERS__PLATFORM__INTEL__IO_MMU_H_ */
|
66
repos/pc/src/drivers/platform/pc/intel/managed_root_table.cc
Normal file
66
repos/pc/src/drivers/platform/pc/intel/managed_root_table.cc
Normal file
@ -0,0 +1,66 @@
|
||||
/*
|
||||
* \brief Allocation and configuration helper for root and context tables
|
||||
* \author Johannes Schlatow
|
||||
* \date 2023-08-31
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2023 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
/* local includes */
|
||||
#include <intel/managed_root_table.h>
|
||||
|
||||
void Intel::Managed_root_table::remove_context(Pci::Bdf bdf,
|
||||
addr_t phys_addr)
|
||||
{
|
||||
_with_context_table(bdf.bus, [&] (Context_table & ctx) {
|
||||
Pci::rid_t rid = Pci::Bdf::rid(bdf);
|
||||
|
||||
if (ctx.stage2_pointer(rid) != phys_addr)
|
||||
error("Trying to remove foreign translation table for ", bdf);
|
||||
|
||||
ctx.remove(rid, _force_flush);
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
void Intel::Managed_root_table::remove_context(addr_t phys_addr)
|
||||
{
|
||||
Root_table::for_each([&] (uint8_t bus) {
|
||||
_with_context_table(bus, [&] (Context_table & ctx) {
|
||||
Context_table::for_each(0, [&] (Pci::rid_t id) {
|
||||
if (!ctx.present(id))
|
||||
return;
|
||||
|
||||
if (ctx.stage2_pointer(id) != phys_addr)
|
||||
return;
|
||||
|
||||
Pci::Bdf bdf = { (Pci::bus_t) bus,
|
||||
(Pci::dev_t) Pci::Bdf::Routing_id::Device::get(id),
|
||||
(Pci::func_t)Pci::Bdf::Routing_id::Function::get(id) };
|
||||
remove_context(bdf, phys_addr);
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
Intel::Managed_root_table::~Managed_root_table()
|
||||
{
|
||||
_table_allocator.destruct<Root_table>(_root_table_phys);
|
||||
|
||||
/* destruct context tables */
|
||||
_table_allocator.with_table<Root_table>(_root_table_phys,
|
||||
[&] (Root_table & root_table) {
|
||||
Root_table::for_each([&] (uint8_t bus) {
|
||||
if (root_table.present(bus)) {
|
||||
addr_t phys_addr = root_table.address(bus);
|
||||
_table_allocator.destruct<Context_table>(phys_addr);
|
||||
}
|
||||
});
|
||||
}, [&] () {});
|
||||
}
|
133
repos/pc/src/drivers/platform/pc/intel/managed_root_table.h
Normal file
133
repos/pc/src/drivers/platform/pc/intel/managed_root_table.h
Normal file
@ -0,0 +1,133 @@
|
||||
/*
|
||||
* \brief Allocation and configuration helper for root and context tables
|
||||
* \author Johannes Schlatow
|
||||
* \date 2023-08-31
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2023 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
#ifndef _SRC__DRIVERS__PLATFORM__INTEL__MANAGED_ROOT_TABLE_H_
|
||||
#define _SRC__DRIVERS__PLATFORM__INTEL__MANAGED_ROOT_TABLE_H_
|
||||
|
||||
/* Genode includes */
|
||||
#include <base/env.h>
|
||||
#include <base/attached_ram_dataspace.h>
|
||||
|
||||
/* local includes */
|
||||
#include <intel/root_table.h>
|
||||
#include <intel/context_table.h>
|
||||
#include <intel/domain_allocator.h>
|
||||
#include <intel/report_helper.h>
|
||||
#include <hw/page_table_allocator.h>
|
||||
|
||||
namespace Intel {
|
||||
using namespace Genode;
|
||||
|
||||
class Managed_root_table;
|
||||
}
|
||||
|
||||
|
||||
class Intel::Managed_root_table : public Registered_translation_table
|
||||
{
|
||||
public:
|
||||
|
||||
using Allocator = Hw::Page_table_allocator<4096>;
|
||||
|
||||
private:
|
||||
|
||||
Env & _env;
|
||||
|
||||
Allocator & _table_allocator;
|
||||
|
||||
addr_t _root_table_phys { _table_allocator.construct<Root_table>() };
|
||||
|
||||
bool _force_flush;
|
||||
|
||||
template <typename FN>
|
||||
void _with_context_table(uint8_t bus, FN && fn, bool create = false)
|
||||
{
|
||||
auto no_match_fn = [&] () { };
|
||||
|
||||
_table_allocator.with_table<Root_table>(_root_table_phys,
|
||||
[&] (Root_table & root_table) {
|
||||
|
||||
/* allocate table if not present */
|
||||
bool new_table { false };
|
||||
if (!root_table.present(bus)) {
|
||||
if (!create) return;
|
||||
|
||||
root_table.address(bus,
|
||||
_table_allocator.construct<Context_table>(),
|
||||
_force_flush);
|
||||
new_table = true;
|
||||
}
|
||||
|
||||
_table_allocator.with_table<Context_table>(root_table.address(bus),
|
||||
[&] (Context_table & ctx) {
|
||||
if (_force_flush && new_table)
|
||||
ctx.flush_all();
|
||||
|
||||
fn(ctx);
|
||||
}, no_match_fn);
|
||||
|
||||
}, no_match_fn);
|
||||
}
|
||||
|
||||
public:
|
||||
|
||||
addr_t phys_addr() { return _root_table_phys; }
|
||||
|
||||
/* add second-stage table */
|
||||
template <unsigned ADDRESS_WIDTH>
|
||||
Domain_id insert_context(Pci::Bdf bdf, addr_t phys_addr, Domain_id domain)
|
||||
{
|
||||
Domain_id cur_domain { };
|
||||
|
||||
_with_context_table(bdf.bus, [&] (Context_table & ctx) {
|
||||
Pci::rid_t rid = Pci::Bdf::rid(bdf);
|
||||
|
||||
if (ctx.present(rid))
|
||||
cur_domain = Domain_id(ctx.domain(rid));
|
||||
|
||||
ctx.insert<ADDRESS_WIDTH>(rid, phys_addr, domain.value, _force_flush);
|
||||
}, true);
|
||||
|
||||
return cur_domain;
|
||||
}
|
||||
|
||||
/* remove second-stage table for particular device */
|
||||
void remove_context(Pci::Bdf, addr_t);
|
||||
|
||||
/* remove second-stage table for all devices */
|
||||
void remove_context(addr_t);
|
||||
|
||||
/* Registered_translation_table interface */
|
||||
addr_t virt_addr(addr_t pa) override
|
||||
{
|
||||
addr_t va { 0 };
|
||||
_table_allocator.with_table<Context_table>(pa,
|
||||
[&] (Context_table & table) { va = (addr_t)&table; },
|
||||
[&] () { va = 0; });
|
||||
|
||||
return va;
|
||||
}
|
||||
|
||||
Managed_root_table(Env & env,
|
||||
Allocator & table_allocator,
|
||||
Translation_table_registry & registry,
|
||||
bool force_flush)
|
||||
: Registered_translation_table(registry),
|
||||
_env(env),
|
||||
_table_allocator(table_allocator),
|
||||
_force_flush(force_flush)
|
||||
{ }
|
||||
|
||||
~Managed_root_table();
|
||||
};
|
||||
|
||||
#endif /* _SRC__DRIVERS__PLATFORM__INTEL__MANAGED_ROOT_TABLE_H_ */
|
62
repos/pc/src/drivers/platform/pc/intel/page_table.cc
Normal file
62
repos/pc/src/drivers/platform/pc/intel/page_table.cc
Normal file
@ -0,0 +1,62 @@
|
||||
/*
|
||||
* \brief x86_64 DMAR page table definitions
|
||||
* \author Johannes Schlatow
|
||||
* \date 2023-11-06
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2023 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
#include <intel/page_table.h>
|
||||
|
||||
void Intel::Level_1_translation_table::generate(
|
||||
Genode::Xml_generator & xml,
|
||||
Genode::Env &,
|
||||
Report_helper &)
|
||||
{
|
||||
for_each_entry([&] (unsigned long i, Descriptor::access_t e) {
|
||||
Descriptor::generate_page(i, e, xml); });
|
||||
}
|
||||
|
||||
|
||||
void Intel::Level_2_translation_table::generate(
|
||||
Genode::Xml_generator & xml,
|
||||
Genode::Env & env,
|
||||
Report_helper & report_helper)
|
||||
{
|
||||
for_each_entry([&] (unsigned long i, Descriptor::access_t e) {
|
||||
if (Descriptor::maps_page(e))
|
||||
Descriptor::Page::generate_page(i, e, xml);
|
||||
else
|
||||
Descriptor::Table::generate<Entry>(i, e, xml, env, report_helper);
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
void Intel::Level_3_translation_table::generate(
|
||||
Genode::Xml_generator & xml,
|
||||
Genode::Env & env,
|
||||
Report_helper & report_helper)
|
||||
{
|
||||
for_each_entry([&] (unsigned long i, Descriptor::access_t e) {
|
||||
if (Descriptor::maps_page(e))
|
||||
Descriptor::Page::generate_page(i, e, xml);
|
||||
else
|
||||
Descriptor::Table::generate<Entry>(i, e, xml, env, report_helper);
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
void Intel::Level_4_translation_table::generate(
|
||||
Genode::Xml_generator & xml,
|
||||
Genode::Env & env,
|
||||
Report_helper & report_helper)
|
||||
{
|
||||
for_each_entry([&] (unsigned long i, Descriptor::access_t e) {
|
||||
Descriptor::generate<Entry>(i, e, xml, env, report_helper);
|
||||
});
|
||||
}
|
301
repos/pc/src/drivers/platform/pc/intel/page_table.h
Normal file
301
repos/pc/src/drivers/platform/pc/intel/page_table.h
Normal file
@ -0,0 +1,301 @@
|
||||
/*
|
||||
* \brief x86_64 DMAR page table definitions
|
||||
* \author Johannes Schlatow
|
||||
* \date 2023-11-06
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2023 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
#ifndef _SRC__DRIVERS__PLATFORM__PC__INTEL__PAGE_TABLE_H_
|
||||
#define _SRC__DRIVERS__PLATFORM__PC__INTEL__PAGE_TABLE_H_
|
||||
|
||||
#include <util/register.h>
|
||||
#include <util/xml_generator.h>
|
||||
|
||||
#include <page_table_base.h>
|
||||
#include <intel/report_helper.h>
|
||||
|
||||
namespace Intel {
|
||||
|
||||
/**
|
||||
* Common descriptor.
|
||||
*
|
||||
* Table entry containing descriptor fields common to all levels.
|
||||
*/
|
||||
struct Common_descriptor : Genode::Register<64>
|
||||
{
|
||||
struct R : Bitfield<0, 1> { }; /* read */
|
||||
struct W : Bitfield<1, 1> { }; /* write */
|
||||
struct A : Bitfield<8, 1> { }; /* accessed */
|
||||
struct D : Bitfield<9, 1> { }; /* dirty */
|
||||
|
||||
static bool present(access_t const v) { return R::get(v) || W::get(v); }
|
||||
|
||||
static access_t create(Page_flags const &flags)
|
||||
{
|
||||
return R::bits(1)
|
||||
| W::bits(flags.writeable);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return descriptor value with cleared accessed and dirty flags. These
|
||||
* flags can be set by the MMU.
|
||||
*/
|
||||
static access_t clear_mmu_flags(access_t value)
|
||||
{
|
||||
A::clear(value);
|
||||
D::clear(value);
|
||||
return value;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Level 1 descriptor
|
||||
*/
|
||||
template <unsigned _PAGE_SIZE_LOG2>
|
||||
struct Level_1_descriptor;
|
||||
|
||||
/**
|
||||
* Base descriptor for page directories (intermediate level)
|
||||
*/
|
||||
struct Page_directory_base_descriptor : Common_descriptor
|
||||
{
|
||||
using Common = Common_descriptor;
|
||||
|
||||
struct Ps : Common::template Bitfield<7, 1> { }; /* page size */
|
||||
|
||||
static bool maps_page(access_t const v) { return Ps::get(v); }
|
||||
};
|
||||
|
||||
/**
|
||||
* Intermediate level descriptor
|
||||
*
|
||||
* Wraps descriptors for page tables and pages.
|
||||
*/
|
||||
template <unsigned _PAGE_SIZE_LOG2>
|
||||
struct Page_directory_descriptor : Page_directory_base_descriptor
|
||||
{
|
||||
static constexpr size_t PAGE_SIZE_LOG2 = _PAGE_SIZE_LOG2;
|
||||
|
||||
struct Page;
|
||||
struct Table;
|
||||
};
|
||||
|
||||
/**
|
||||
* Level 4 descriptor
|
||||
*/
|
||||
template <unsigned _PAGE_SIZE_LOG2, unsigned _SIZE_LOG2>
|
||||
struct Level_4_descriptor;
|
||||
}
|
||||
|
||||
|
||||
template <unsigned _PAGE_SIZE_LOG2>
|
||||
struct Intel::Level_1_descriptor : Common_descriptor
|
||||
{
|
||||
using Common = Common_descriptor;
|
||||
|
||||
static constexpr size_t PAGE_SIZE_LOG2 = _PAGE_SIZE_LOG2;
|
||||
|
||||
struct Pa : Bitfield<12, 36> { }; /* physical address */
|
||||
|
||||
static access_t create(Page_flags const &flags, addr_t const pa)
|
||||
{
|
||||
/* Ipat and Emt are ignored in legacy mode */
|
||||
|
||||
return Common::create(flags)
|
||||
| Pa::masked(pa);
|
||||
}
|
||||
|
||||
static void generate_page(unsigned long index,
|
||||
access_t entry,
|
||||
Genode::Xml_generator & xml)
|
||||
{
|
||||
using Genode::Hex;
|
||||
using Hex_str = Genode::String<20>;
|
||||
|
||||
xml.node("page", [&] () {
|
||||
addr_t addr = Pa::masked(entry);
|
||||
xml.attribute("index", Hex_str(Hex(index << PAGE_SIZE_LOG2)));
|
||||
xml.attribute("value", Hex_str(Hex(entry)));
|
||||
xml.attribute("address", Hex_str(Hex(addr)));
|
||||
xml.attribute("accessed",(bool)A::get(entry));
|
||||
xml.attribute("dirty", (bool)D::get(entry));
|
||||
xml.attribute("write", (bool)W::get(entry));
|
||||
xml.attribute("read", (bool)R::get(entry));
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
template <unsigned _PAGE_SIZE_LOG2>
|
||||
struct Intel::Page_directory_descriptor<_PAGE_SIZE_LOG2>::Table
|
||||
: Page_directory_base_descriptor
|
||||
{
|
||||
using Base = Page_directory_base_descriptor;
|
||||
|
||||
/**
|
||||
* Physical address
|
||||
*/
|
||||
struct Pa : Base::template Bitfield<12, 36> { };
|
||||
|
||||
static typename Base::access_t create(addr_t const pa)
|
||||
{
|
||||
static Page_flags flags { RW, NO_EXEC, USER, NO_GLOBAL,
|
||||
RAM, Genode::UNCACHED };
|
||||
return Base::create(flags) | Pa::masked(pa);
|
||||
}
|
||||
|
||||
template <typename ENTRY>
|
||||
static void generate(unsigned long index,
|
||||
access_t entry,
|
||||
Genode::Xml_generator & xml,
|
||||
Genode::Env & env,
|
||||
Report_helper & report_helper)
|
||||
{
|
||||
using Genode::Hex;
|
||||
using Hex_str = Genode::String<20>;
|
||||
|
||||
xml.node("page_directory", [&] () {
|
||||
addr_t pd_addr = Pa::masked(entry);
|
||||
xml.attribute("index", Hex_str(Hex(index << PAGE_SIZE_LOG2)));
|
||||
xml.attribute("value", Hex_str(Hex(entry)));
|
||||
xml.attribute("address", Hex_str(Hex(pd_addr)));
|
||||
|
||||
report_helper.with_table<ENTRY>(pd_addr, [&] (ENTRY & pd) {
|
||||
pd.generate(xml, env, report_helper); });
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
template <unsigned _PAGE_SIZE_LOG2>
|
||||
struct Intel::Page_directory_descriptor<_PAGE_SIZE_LOG2>::Page
|
||||
: Page_directory_base_descriptor
|
||||
{
|
||||
using Base = Page_directory_base_descriptor;
|
||||
|
||||
/**
|
||||
* Physical address
|
||||
*/
|
||||
struct Pa : Base::template Bitfield<PAGE_SIZE_LOG2,
|
||||
48 - PAGE_SIZE_LOG2> { };
|
||||
|
||||
|
||||
static typename Base::access_t create(Page_flags const &flags,
|
||||
addr_t const pa)
|
||||
{
|
||||
/* Ipat and Emt are ignored in legacy mode */
|
||||
|
||||
return Base::create(flags)
|
||||
| Base::Ps::bits(1)
|
||||
| Pa::masked(pa);
|
||||
}
|
||||
|
||||
static void generate_page(unsigned long index,
|
||||
access_t entry,
|
||||
Genode::Xml_generator & xml)
|
||||
{
|
||||
using Genode::Hex;
|
||||
using Hex_str = Genode::String<20>;
|
||||
|
||||
xml.node("page", [&] () {
|
||||
addr_t addr = Pa::masked(entry);
|
||||
xml.attribute("index", Hex_str(Hex(index << PAGE_SIZE_LOG2)));
|
||||
xml.attribute("value", Hex_str(Hex(entry)));
|
||||
xml.attribute("address", Hex_str(Hex(addr)));
|
||||
xml.attribute("accessed",(bool)A::get(entry));
|
||||
xml.attribute("dirty", (bool)D::get(entry));
|
||||
xml.attribute("write", (bool)W::get(entry));
|
||||
xml.attribute("read", (bool)R::get(entry));
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
template <unsigned _PAGE_SIZE_LOG2, unsigned _SIZE_LOG2>
|
||||
struct Intel::Level_4_descriptor : Common_descriptor
|
||||
{
|
||||
static constexpr size_t PAGE_SIZE_LOG2 = _PAGE_SIZE_LOG2;
|
||||
static constexpr size_t SIZE_LOG2 = _SIZE_LOG2;
|
||||
|
||||
struct Pa : Bitfield<12, SIZE_LOG2> { }; /* physical address */
|
||||
|
||||
static access_t create(addr_t const pa)
|
||||
{
|
||||
static Page_flags flags { RW, NO_EXEC, USER, NO_GLOBAL,
|
||||
RAM, Genode::UNCACHED };
|
||||
return Common_descriptor::create(flags) | Pa::masked(pa);
|
||||
}
|
||||
|
||||
template <typename ENTRY>
|
||||
static void generate(unsigned long index,
|
||||
access_t entry,
|
||||
Genode::Xml_generator & xml,
|
||||
Genode::Env & env,
|
||||
Report_helper & report_helper)
|
||||
{
|
||||
using Genode::Hex;
|
||||
using Hex_str = Genode::String<20>;
|
||||
|
||||
xml.node("level4_entry", [&] () {
|
||||
addr_t level3_addr = Pa::masked(entry);
|
||||
xml.attribute("index", Hex_str(Hex(index << PAGE_SIZE_LOG2)));
|
||||
xml.attribute("value", Hex_str(Hex(entry)));
|
||||
xml.attribute("address", Hex_str(Hex(level3_addr)));
|
||||
|
||||
report_helper.with_table<ENTRY>(level3_addr, [&] (ENTRY & level3_table) {
|
||||
level3_table.generate(xml, env, report_helper); });
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
namespace Intel {
|
||||
|
||||
struct Level_1_translation_table
|
||||
:
|
||||
Final_table<Level_1_descriptor<SIZE_LOG2_4KB>>
|
||||
{
|
||||
static constexpr unsigned address_width() { return SIZE_LOG2_2MB; }
|
||||
|
||||
void generate(Genode::Xml_generator &, Genode::Env & env, Report_helper &);
|
||||
} __attribute__((aligned(1 << ALIGNM_LOG2)));
|
||||
|
||||
struct Level_2_translation_table
|
||||
:
|
||||
Page_directory<Level_1_translation_table,
|
||||
Page_directory_descriptor<SIZE_LOG2_2MB>>
|
||||
{
|
||||
static constexpr unsigned address_width() { return SIZE_LOG2_1GB; }
|
||||
|
||||
void generate(Genode::Xml_generator &, Genode::Env & env, Report_helper &);
|
||||
} __attribute__((aligned(1 << ALIGNM_LOG2)));
|
||||
|
||||
struct Level_3_translation_table
|
||||
:
|
||||
Page_directory<Level_2_translation_table,
|
||||
Page_directory_descriptor<SIZE_LOG2_1GB>>
|
||||
{
|
||||
static constexpr unsigned address_width() { return SIZE_LOG2_512GB; }
|
||||
|
||||
void generate(Genode::Xml_generator &, Genode::Env & env, Report_helper &);
|
||||
} __attribute__((aligned(1 << ALIGNM_LOG2)));
|
||||
|
||||
struct Level_4_translation_table
|
||||
:
|
||||
Pml4_table<Level_3_translation_table,
|
||||
Level_4_descriptor<SIZE_LOG2_512GB, SIZE_LOG2_256TB>>
|
||||
{
|
||||
static constexpr unsigned address_width() { return SIZE_LOG2_256TB; }
|
||||
|
||||
void generate(Genode::Xml_generator &, Genode::Env & env, Report_helper &);
|
||||
} __attribute__((aligned(1 << ALIGNM_LOG2)));
|
||||
|
||||
}
|
||||
|
||||
#endif /* _SRC__DRIVERS__PLATFORM__PC__INTEL__PAGE_TABLE_H_ */
|
71
repos/pc/src/drivers/platform/pc/intel/report_helper.h
Normal file
71
repos/pc/src/drivers/platform/pc/intel/report_helper.h
Normal file
@ -0,0 +1,71 @@
|
||||
/*
|
||||
* \brief Helper for translating physical addresses into tables
|
||||
* \author Johannes Schlatow
|
||||
* \date 2023-08-31
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2023 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
#ifndef _SRC__DRIVERS__PLATFORM__INTEL__REPORT_HELPER_H_
|
||||
#define _SRC__DRIVERS__PLATFORM__INTEL__REPORT_HELPER_H_
|
||||
|
||||
/* Genode includes */
|
||||
#include <base/registry.h>
|
||||
|
||||
namespace Intel {
|
||||
using namespace Genode;
|
||||
|
||||
class Registered_translation_table;
|
||||
class Report_helper;
|
||||
|
||||
using Translation_table_registry = Registry<Registered_translation_table>;
|
||||
}
|
||||
|
||||
|
||||
class Intel::Registered_translation_table : private Translation_table_registry::Element
|
||||
{
|
||||
public:
|
||||
|
||||
virtual addr_t virt_addr(addr_t) = 0;
|
||||
|
||||
Registered_translation_table(Translation_table_registry & registry)
|
||||
: Translation_table_registry::Element(registry, *this)
|
||||
{ }
|
||||
|
||||
virtual ~Registered_translation_table() { }
|
||||
};
|
||||
|
||||
|
||||
class Intel::Report_helper
|
||||
{
|
||||
private:
|
||||
|
||||
Translation_table_registry & _registry;
|
||||
|
||||
public:
|
||||
|
||||
template <typename TABLE, typename FN>
|
||||
void with_table(addr_t phys_addr, FN && fn)
|
||||
{
|
||||
addr_t va { 0 };
|
||||
_registry.for_each([&] (Registered_translation_table & table) {
|
||||
if (!va)
|
||||
va = table.virt_addr(phys_addr);
|
||||
});
|
||||
|
||||
if (va)
|
||||
fn(*reinterpret_cast<TABLE*>(va));
|
||||
}
|
||||
|
||||
Report_helper(Translation_table_registry & registry)
|
||||
: _registry(registry)
|
||||
{ }
|
||||
|
||||
};
|
||||
|
||||
#endif /* _SRC__DRIVERS__PLATFORM__INTEL__REPORT_HELPER_H_ */
|
48
repos/pc/src/drivers/platform/pc/intel/root_table.cc
Normal file
48
repos/pc/src/drivers/platform/pc/intel/root_table.cc
Normal file
@ -0,0 +1,48 @@
|
||||
/*
|
||||
* \brief Intel IOMMU Root Table implementation
|
||||
* \author Johannes Schlatow
|
||||
* \date 2023-08-31
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2023 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
/* local includes */
|
||||
#include <intel/root_table.h>
|
||||
#include <intel/context_table.h>
|
||||
#include <intel/report_helper.h>
|
||||
|
||||
static void attribute_hex(Genode::Xml_generator & xml, char const * name,
|
||||
unsigned long long value)
|
||||
{
|
||||
xml.attribute(name, Genode::String<32>(Genode::Hex(value)));
|
||||
}
|
||||
|
||||
|
||||
void Intel::Root_table::generate(Xml_generator & xml,
|
||||
Env & env,
|
||||
Report_helper & report_helper)
|
||||
{
|
||||
for_each([&] (uint8_t bus) {
|
||||
if (!present(bus))
|
||||
return;
|
||||
|
||||
addr_t ctx_addr = address(bus);
|
||||
|
||||
xml.node("root_entry", [&] () {
|
||||
xml.attribute("bus", bus);
|
||||
attribute_hex(xml, "context_table", ctx_addr);
|
||||
|
||||
auto fn = [&] (Context_table & context) {
|
||||
context.generate(xml, env, report_helper);
|
||||
};
|
||||
|
||||
/* dump context table */
|
||||
report_helper.with_table<Context_table>(ctx_addr, fn);
|
||||
});
|
||||
});
|
||||
}
|
87
repos/pc/src/drivers/platform/pc/intel/root_table.h
Normal file
87
repos/pc/src/drivers/platform/pc/intel/root_table.h
Normal file
@ -0,0 +1,87 @@
|
||||
/*
|
||||
* \brief Intel IOMMU Root Table implementation
|
||||
* \author Johannes Schlatow
|
||||
* \date 2023-08-31
|
||||
*
|
||||
* The root table is a page-aligned 4KB size structure. It is indexed by the
|
||||
* bus number. In legacy mode, each entry contains a context-table pointer
|
||||
* (see 9.1 and 11.4.5).
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2023 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
#ifndef _SRC__DRIVERS__PLATFORM__INTEL__ROOT_TABLE_H_
|
||||
#define _SRC__DRIVERS__PLATFORM__INTEL__ROOT_TABLE_H_
|
||||
|
||||
/* Genode includes */
|
||||
#include <base/env.h>
|
||||
#include <util/register.h>
|
||||
#include <util/xml_generator.h>
|
||||
|
||||
/* local includes */
|
||||
#include <clflush.h>
|
||||
|
||||
namespace Intel {
|
||||
using namespace Genode;
|
||||
|
||||
class Root_table;
|
||||
|
||||
/* forward declaration */
|
||||
class Report_helper;
|
||||
}
|
||||
|
||||
|
||||
class Intel::Root_table
|
||||
{
|
||||
private:
|
||||
|
||||
struct Entry : Genode::Register<64>
|
||||
{
|
||||
struct Present : Bitfield< 0, 1> { };
|
||||
struct Address : Bitfield<12,52> { };
|
||||
};
|
||||
|
||||
typename Entry::access_t _entries[512];
|
||||
|
||||
public:
|
||||
|
||||
template <typename FN>
|
||||
static void for_each(FN && fn)
|
||||
{
|
||||
uint8_t bus = 0;
|
||||
do {
|
||||
fn(bus);
|
||||
bus++;
|
||||
} while (bus != 0xFF);
|
||||
}
|
||||
|
||||
bool present(uint8_t bus) {
|
||||
return Entry::Present::get(_entries[bus*2]); }
|
||||
|
||||
addr_t address(uint8_t bus) {
|
||||
return Entry::Address::masked(_entries[bus*2]); }
|
||||
|
||||
void address(uint8_t bus, addr_t addr, bool flush)
|
||||
{
|
||||
_entries[bus*2] = Entry::Address::masked(addr) | Entry::Present::bits(1);
|
||||
|
||||
if (flush)
|
||||
Utils::clflush(&_entries[bus*2]);
|
||||
}
|
||||
|
||||
void generate(Xml_generator &, Env &, Report_helper &);
|
||||
|
||||
Root_table()
|
||||
{
|
||||
for (Genode::size_t i=0; i < 512; i++)
|
||||
_entries[i] = 0;
|
||||
}
|
||||
|
||||
} __attribute__((aligned(4096)));
|
||||
|
||||
#endif /* _SRC__DRIVERS__PLATFORM__INTEL__ROOT_TABLE_H_ */
|
@ -1,4 +1,4 @@
|
||||
TARGET = pc_platform_drv
|
||||
REQUIRES = x86
|
||||
REQUIRES = x86_32
|
||||
|
||||
include $(call select_from_repositories,src/drivers/platform/target.inc)
|
88
repos/pc/src/drivers/platform/pc/spec/x86_64/main.cc
Normal file
88
repos/pc/src/drivers/platform/pc/spec/x86_64/main.cc
Normal file
@ -0,0 +1,88 @@
|
||||
/*
|
||||
* \brief Platform driver for PC
|
||||
* \author Stefan Kalkowski
|
||||
* \date 2022-10-05
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2022 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
#include <base/component.h>
|
||||
|
||||
#include <common.h>
|
||||
#include <intel/io_mmu.h>
|
||||
|
||||
namespace Driver { struct Main; };
|
||||
|
||||
struct Driver::Main
|
||||
{
|
||||
Env & _env;
|
||||
Attached_rom_dataspace _config_rom { _env, "config" };
|
||||
Attached_rom_dataspace _acpi_rom { _env, "acpi" };
|
||||
Attached_rom_dataspace _system_rom { _env, "system" };
|
||||
Common _common { _env, _config_rom };
|
||||
Signal_handler<Main> _config_handler { _env.ep(), *this,
|
||||
&Main::_handle_config };
|
||||
Signal_handler<Main> _system_handler { _env.ep(), *this,
|
||||
&Main::_system_update };
|
||||
|
||||
Intel::Io_mmu_factory _intel_iommu { _env, _common.io_mmu_factories() };
|
||||
|
||||
void _handle_config();
|
||||
void _reset();
|
||||
void _system_update();
|
||||
|
||||
Main(Genode::Env & e)
|
||||
: _env(e)
|
||||
{
|
||||
_config_rom.sigh(_config_handler);
|
||||
_acpi_rom.sigh(_system_handler);
|
||||
_system_rom.sigh(_system_handler);
|
||||
_handle_config();
|
||||
_system_update();
|
||||
_common.acquire_io_mmu_devices();
|
||||
_common.announce_service();
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
void Driver::Main::_handle_config()
|
||||
{
|
||||
_config_rom.update();
|
||||
_common.handle_config(_config_rom.xml());
|
||||
}
|
||||
|
||||
|
||||
void Driver::Main::_reset()
|
||||
{
|
||||
_acpi_rom.update();
|
||||
_acpi_rom.xml().with_optional_sub_node("reset", [&] (Xml_node reset)
|
||||
{
|
||||
uint16_t const io_port = reset.attribute_value<uint16_t>("io_port", 0);
|
||||
uint8_t const value = reset.attribute_value<uint8_t>("value", 0);
|
||||
|
||||
log("trigger reset by writing value ", value, " to I/O port ", Hex(io_port));
|
||||
|
||||
try {
|
||||
Io_port_connection reset_port { _env, io_port, 1 };
|
||||
reset_port.outb(io_port, value);
|
||||
} catch (...) {
|
||||
error("unable to access reset I/O port ", Hex(io_port)); }
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
void Driver::Main::_system_update()
|
||||
{
|
||||
_system_rom.update();
|
||||
if (_system_rom.xml().attribute_value("state", String<16>()) == "reset")
|
||||
_reset();
|
||||
}
|
||||
|
||||
|
||||
void Component::construct(Genode::Env &env) {
|
||||
static Driver::Main main(env); }
|
651
repos/pc/src/drivers/platform/pc/spec/x86_64/page_table_base.h
Normal file
651
repos/pc/src/drivers/platform/pc/spec/x86_64/page_table_base.h
Normal file
@ -0,0 +1,651 @@
|
||||
/*
|
||||
* \brief x86_64 page table definitions
|
||||
* \author Adrian-Ken Rueegsegger
|
||||
* \author Johannes Schlatow
|
||||
* \date 2015-02-06
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2015-2023 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
#ifndef _SRC__DRIVERS__PLATFORM__PC__SPEC__X86_64__PAGE_TABLE_BASE_H_
|
||||
#define _SRC__DRIVERS__PLATFORM__PC__SPEC__X86_64__PAGE_TABLE_BASE_H_
|
||||
|
||||
#include <base/log.h>
|
||||
#include <hw/page_flags.h>
|
||||
#include <hw/util.h>
|
||||
#include <util/misc_math.h>
|
||||
|
||||
#include <clflush.h>
|
||||
#include <expanding_page_table_allocator.h>
|
||||
|
||||
#define assert(expression)
|
||||
|
||||
namespace Genode {
|
||||
|
||||
using namespace Hw;
|
||||
|
||||
/**
|
||||
* (Generic) 4-level translation structures.
|
||||
*/
|
||||
|
||||
enum {
|
||||
SIZE_LOG2_4KB = 12,
|
||||
SIZE_LOG2_2MB = 21,
|
||||
SIZE_LOG2_1GB = 30,
|
||||
SIZE_LOG2_512GB = 39,
|
||||
SIZE_LOG2_256TB = 48,
|
||||
};
|
||||
|
||||
/**
|
||||
* Final page table template
|
||||
*
|
||||
* The last-level page table solely maps page frames.
|
||||
*/
|
||||
template <typename DESCRIPTOR>
|
||||
class Final_table;
|
||||
|
||||
/**
|
||||
* Page directory template.
|
||||
*
|
||||
* Page directories can refer to paging structures of the next level
|
||||
* or directly map page frames by using large page mappings.
|
||||
*/
|
||||
template <typename ENTRY, typename DESCRIPTOR>
|
||||
class Page_directory;
|
||||
|
||||
/**
|
||||
* The 4th-level table refers to paging structures of the next level.
|
||||
*/
|
||||
template <typename ENTRY, typename DESCRIPTOR>
|
||||
class Pml4_table;
|
||||
}
|
||||
|
||||
|
||||
template <typename DESCRIPTOR>
|
||||
class Genode::Final_table
|
||||
{
|
||||
public:
|
||||
|
||||
using Descriptor = DESCRIPTOR;
|
||||
|
||||
private:
|
||||
|
||||
static constexpr size_t PAGE_SIZE_LOG2 = DESCRIPTOR::PAGE_SIZE_LOG2;
|
||||
static constexpr size_t MAX_ENTRIES = 512;
|
||||
static constexpr size_t PAGE_SIZE = 1UL << PAGE_SIZE_LOG2;
|
||||
static constexpr size_t PAGE_MASK = ~((1UL << PAGE_SIZE_LOG2) - 1);
|
||||
|
||||
class Misaligned {};
|
||||
class Invalid_range {};
|
||||
class Double_insertion {};
|
||||
|
||||
typename DESCRIPTOR::access_t _entries[MAX_ENTRIES];
|
||||
|
||||
struct Insert_func
|
||||
{
|
||||
Page_flags const & flags;
|
||||
bool flush;
|
||||
|
||||
Insert_func(Page_flags const & flags, bool flush)
|
||||
: flags(flags), flush(flush) { }
|
||||
|
||||
void operator () (addr_t const vo, addr_t const pa,
|
||||
size_t const size,
|
||||
DESCRIPTOR::access_t &desc)
|
||||
{
|
||||
if ((vo & ~PAGE_MASK) || (pa & ~PAGE_MASK) ||
|
||||
size < PAGE_SIZE)
|
||||
{
|
||||
throw Invalid_range();
|
||||
}
|
||||
typename DESCRIPTOR::access_t table_entry =
|
||||
DESCRIPTOR::create(flags, pa);
|
||||
|
||||
if (DESCRIPTOR::present(desc) &&
|
||||
DESCRIPTOR::clear_mmu_flags(desc) != table_entry)
|
||||
{
|
||||
throw Double_insertion();
|
||||
}
|
||||
desc = table_entry;
|
||||
|
||||
if (flush)
|
||||
Utils::clflush(&desc);
|
||||
}
|
||||
};
|
||||
|
||||
struct Remove_func
|
||||
{
|
||||
bool flush;
|
||||
|
||||
Remove_func(bool flush) : flush(flush) { }
|
||||
|
||||
void operator () (addr_t /* vo */, addr_t /* pa */, size_t /* size */,
|
||||
DESCRIPTOR::access_t &desc)
|
||||
{
|
||||
desc = 0;
|
||||
|
||||
if (flush)
|
||||
Utils::clflush(&desc);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename FUNC>
|
||||
void _range_op(addr_t vo, addr_t pa, size_t size, FUNC &&func)
|
||||
{
|
||||
for (size_t i = vo >> PAGE_SIZE_LOG2; size > 0;
|
||||
i = vo >> PAGE_SIZE_LOG2) {
|
||||
assert (i < MAX_ENTRIES);
|
||||
addr_t end = (vo + PAGE_SIZE) & PAGE_MASK;
|
||||
size_t sz = Genode::min(size, end-vo);
|
||||
|
||||
func(vo, pa, sz, _entries[i]);
|
||||
|
||||
/* check whether we wrap */
|
||||
if (end < vo) return;
|
||||
|
||||
size = size - sz;
|
||||
vo += sz;
|
||||
pa += sz;
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
|
||||
static constexpr size_t MIN_PAGE_SIZE_LOG2 = SIZE_LOG2_4KB;
|
||||
static constexpr size_t ALIGNM_LOG2 = SIZE_LOG2_4KB;
|
||||
|
||||
/**
|
||||
* A page table consists of 512 entries that each maps a 4KB page
|
||||
* frame. For further details refer to Intel SDM Vol. 3A, table 4-19.
|
||||
*/
|
||||
Final_table()
|
||||
{
|
||||
if (!Hw::aligned(this, ALIGNM_LOG2)) throw Misaligned();
|
||||
Genode::memset(&_entries, 0, sizeof(_entries));
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns True if table does not contain any page mappings.
|
||||
*/
|
||||
bool empty()
|
||||
{
|
||||
for (unsigned i = 0; i < MAX_ENTRIES; i++)
|
||||
if (DESCRIPTOR::present(_entries[i]))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename FN>
|
||||
void for_each_entry(FN && fn)
|
||||
{
|
||||
for (unsigned long i = 0; i < MAX_ENTRIES; i++) {
|
||||
if (Descriptor::present(_entries[i]))
|
||||
fn(i, _entries[i]);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Insert translations into this table
|
||||
*
|
||||
* \param vo offset of the virtual region represented
|
||||
* by the translation within the virtual
|
||||
* region represented by this table
|
||||
* \param pa base of the physical backing store
|
||||
* \param size size of the translated region
|
||||
* \param flags mapping flags
|
||||
* \param alloc second level translation table allocator
|
||||
* \param flush flush cache lines of table entries
|
||||
* \param supported_sizes supported page sizes
|
||||
*/
|
||||
template <typename ALLOCATOR>
|
||||
void insert_translation(addr_t vo, addr_t pa, size_t size,
|
||||
Page_flags const & flags, ALLOCATOR &,
|
||||
bool flush, uint32_t)
|
||||
{
|
||||
this->_range_op(vo, pa, size, Insert_func(flags, flush));
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove translations that overlap with a given virtual region
|
||||
*
|
||||
* \param vo region offset within the tables virtual region
|
||||
* \param size region size
|
||||
* \param alloc second level translation table allocator
|
||||
*/
|
||||
template <typename ALLOCATOR>
|
||||
void remove_translation(addr_t vo, size_t size, ALLOCATOR &, bool flush)
|
||||
{
|
||||
this->_range_op(vo, 0, size, Remove_func(flush));
|
||||
}
|
||||
|
||||
} __attribute__((aligned(1 << ALIGNM_LOG2)));
|
||||
|
||||
|
||||
template <typename ENTRY, typename DESCRIPTOR>
|
||||
class Genode::Page_directory
|
||||
{
|
||||
public:
|
||||
|
||||
using Descriptor = DESCRIPTOR;
|
||||
using Entry = ENTRY;
|
||||
|
||||
private:
|
||||
|
||||
static constexpr size_t PAGE_SIZE_LOG2 = Descriptor::PAGE_SIZE_LOG2;
|
||||
static constexpr size_t MAX_ENTRIES = 512;
|
||||
static constexpr size_t PAGE_SIZE = 1UL << PAGE_SIZE_LOG2;
|
||||
static constexpr size_t PAGE_MASK = ~((1UL << PAGE_SIZE_LOG2) - 1);
|
||||
|
||||
class Misaligned {};
|
||||
class Invalid_range {};
|
||||
class Double_insertion {};
|
||||
|
||||
typename Descriptor::access_t _entries[MAX_ENTRIES];
|
||||
|
||||
template <typename ALLOCATOR>
|
||||
struct Insert_func
|
||||
{
|
||||
Page_flags const & flags;
|
||||
ALLOCATOR & alloc;
|
||||
bool flush;
|
||||
uint32_t supported_sizes;
|
||||
|
||||
Insert_func(Page_flags const & flags, ALLOCATOR & alloc, bool flush,
|
||||
uint32_t supported_sizes)
|
||||
: flags(flags), alloc(alloc), flush(flush),
|
||||
supported_sizes(supported_sizes)
|
||||
{ }
|
||||
|
||||
void operator () (addr_t const vo, addr_t const pa,
|
||||
size_t const size,
|
||||
typename Descriptor::access_t &desc)
|
||||
{
|
||||
using Td = Descriptor::Table;
|
||||
using access_t = typename Descriptor::access_t;
|
||||
|
||||
/* can we insert a large page mapping? */
|
||||
if ((supported_sizes & PAGE_SIZE) &&
|
||||
!((vo & ~PAGE_MASK) || (pa & ~PAGE_MASK) || size < PAGE_SIZE))
|
||||
{
|
||||
access_t table_entry = Descriptor::Page::create(flags, pa);
|
||||
|
||||
if (Descriptor::present(desc) &&
|
||||
Descriptor::clear_mmu_flags(desc) != table_entry) {
|
||||
throw Double_insertion(); }
|
||||
|
||||
desc = table_entry;
|
||||
if (flush)
|
||||
Utils::clflush(&desc);
|
||||
return;
|
||||
}
|
||||
|
||||
/* we need to use a next level table */
|
||||
if (!Descriptor::present(desc)) {
|
||||
|
||||
/* create and link next level table */
|
||||
addr_t table_phys = alloc.template construct<ENTRY>();
|
||||
desc = (access_t) Td::create(table_phys);
|
||||
|
||||
if (flush)
|
||||
Utils::clflush(&desc);
|
||||
|
||||
} else if (Descriptor::maps_page(desc)) {
|
||||
throw Double_insertion();
|
||||
}
|
||||
|
||||
/* insert translation */
|
||||
alloc.template with_table<ENTRY>(Td::Pa::masked(desc),
|
||||
[&] (ENTRY & table) {
|
||||
table.insert_translation(vo - (vo & PAGE_MASK), pa, size,
|
||||
flags, alloc, flush, supported_sizes);
|
||||
},
|
||||
[&] () {
|
||||
error("Unable to get mapped table address for ",
|
||||
Genode::Hex(Td::Pa::masked(desc)));
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
template <typename ALLOCATOR>
|
||||
struct Remove_func
|
||||
{
|
||||
ALLOCATOR & alloc;
|
||||
bool flush;
|
||||
|
||||
Remove_func(ALLOCATOR & alloc, bool flush)
|
||||
: alloc(alloc), flush(flush) { }
|
||||
|
||||
void operator () (addr_t const vo, addr_t /* pa */,
|
||||
size_t const size,
|
||||
typename Descriptor::access_t &desc)
|
||||
{
|
||||
if (Descriptor::present(desc)) {
|
||||
if (Descriptor::maps_page(desc)) {
|
||||
desc = 0;
|
||||
} else {
|
||||
using Td = Descriptor::Table;
|
||||
|
||||
/* use allocator to retrieve virt address of table */
|
||||
addr_t table_phys = Td::Pa::masked(desc);
|
||||
|
||||
alloc.template with_table<ENTRY>(table_phys,
|
||||
[&] (ENTRY & table) {
|
||||
addr_t const table_vo = vo - (vo & PAGE_MASK);
|
||||
table.remove_translation(table_vo, size, alloc, flush);
|
||||
if (table.empty()) {
|
||||
alloc.template destruct<ENTRY>(table_phys);
|
||||
desc = 0;
|
||||
}
|
||||
},
|
||||
[&] () {
|
||||
error("Unable to get mapped table address for ",
|
||||
Genode::Hex(table_phys));
|
||||
});
|
||||
}
|
||||
|
||||
if (desc == 0 && flush)
|
||||
Utils::clflush(&desc);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template <typename FUNC>
|
||||
void _range_op(addr_t vo, addr_t pa, size_t size, FUNC &&func)
|
||||
{
|
||||
for (size_t i = vo >> PAGE_SIZE_LOG2; size > 0;
|
||||
i = vo >> PAGE_SIZE_LOG2)
|
||||
{
|
||||
assert (i < MAX_ENTRIES);
|
||||
addr_t end = (vo + PAGE_SIZE) & PAGE_MASK;
|
||||
size_t sz = Genode::min(size, end-vo);
|
||||
|
||||
func(vo, pa, sz, _entries[i]);
|
||||
|
||||
/* check whether we wrap */
|
||||
if (end < vo) return;
|
||||
|
||||
size = size - sz;
|
||||
vo += sz;
|
||||
pa += sz;
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
|
||||
static constexpr size_t MIN_PAGE_SIZE_LOG2 = SIZE_LOG2_4KB;
|
||||
static constexpr size_t ALIGNM_LOG2 = SIZE_LOG2_4KB;
|
||||
|
||||
Page_directory()
|
||||
{
|
||||
if (!Hw::aligned(this, ALIGNM_LOG2)) throw Misaligned();
|
||||
Genode::memset(&_entries, 0, sizeof(_entries));
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns True if table does not contain any page mappings.
|
||||
*
|
||||
* \return false if an entry is present, True otherwise
|
||||
*/
|
||||
bool empty()
|
||||
{
|
||||
for (unsigned i = 0; i < MAX_ENTRIES; i++)
|
||||
if (Descriptor::present(_entries[i]))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename FN>
|
||||
void for_each_entry(FN && fn)
|
||||
{
|
||||
for (unsigned long i = 0; i < MAX_ENTRIES; i++)
|
||||
if (Descriptor::present(_entries[i]))
|
||||
fn(i, _entries[i]);
|
||||
}
|
||||
|
||||
/**
|
||||
* Insert translations into this table
|
||||
*
|
||||
* \param vo offset of the virtual region represented
|
||||
* by the translation within the virtual
|
||||
* region represented by this table
|
||||
* \param pa base of the physical backing store
|
||||
* \param size size of the translated region
|
||||
* \param flags mapping flags
|
||||
* \param alloc second level translation table allocator
|
||||
* \param flush flush cache lines of table entries
|
||||
* \param supported_sizes supported page sizes
|
||||
*/
|
||||
template <typename ALLOCATOR>
|
||||
void insert_translation(addr_t vo, addr_t pa, size_t size,
|
||||
Page_flags const & flags, ALLOCATOR & alloc,
|
||||
bool flush, uint32_t supported_sizes) {
|
||||
_range_op(vo, pa, size,
|
||||
Insert_func(flags, alloc, flush, supported_sizes)); }
|
||||
|
||||
/**
|
||||
* Remove translations that overlap with a given virtual region
|
||||
*
|
||||
* \param vo region offset within the tables virtual region
|
||||
* \param size region size
|
||||
* \param alloc second level translation table allocator
|
||||
*/
|
||||
template <typename ALLOCATOR>
|
||||
void remove_translation(addr_t vo, size_t size, ALLOCATOR & alloc,
|
||||
bool flush) {
|
||||
_range_op(vo, 0, size, Remove_func(alloc, flush)); }
|
||||
};
|
||||
|
||||
|
||||
template <typename ENTRY, typename DESCRIPTOR>
|
||||
class Genode::Pml4_table
|
||||
{
|
||||
public:
|
||||
|
||||
using Descriptor = DESCRIPTOR;
|
||||
using Entry = ENTRY;
|
||||
|
||||
private:
|
||||
|
||||
static constexpr size_t PAGE_SIZE_LOG2 = Descriptor::PAGE_SIZE_LOG2;
|
||||
static constexpr size_t SIZE_LOG2 = Descriptor::SIZE_LOG2;
|
||||
static constexpr size_t SIZE_MASK = (1UL << SIZE_LOG2) - 1;
|
||||
static constexpr size_t MAX_ENTRIES = 512;
|
||||
static constexpr size_t PAGE_SIZE = 1UL << PAGE_SIZE_LOG2;
|
||||
static constexpr size_t PAGE_MASK = ~((1UL << PAGE_SIZE_LOG2) - 1);
|
||||
|
||||
class Misaligned {};
|
||||
class Invalid_range {};
|
||||
|
||||
typename Descriptor::access_t _entries[MAX_ENTRIES];
|
||||
|
||||
template <typename ALLOCATOR>
|
||||
struct Insert_func
|
||||
{
|
||||
Page_flags const & flags;
|
||||
ALLOCATOR & alloc;
|
||||
bool flush;
|
||||
uint32_t supported_sizes;
|
||||
|
||||
Insert_func(Page_flags const & flags,
|
||||
ALLOCATOR & alloc, bool flush, uint32_t supported_sizes)
|
||||
: flags(flags), alloc(alloc), flush(flush),
|
||||
supported_sizes(supported_sizes) { }
|
||||
|
||||
void operator () (addr_t const vo, addr_t const pa,
|
||||
size_t const size,
|
||||
Descriptor::access_t &desc)
|
||||
{
|
||||
/* we need to use a next level table */
|
||||
if (!Descriptor::present(desc)) {
|
||||
/* create and link next level table */
|
||||
addr_t table_phys = alloc.template construct<ENTRY>();
|
||||
desc = Descriptor::create(table_phys);
|
||||
|
||||
if (flush)
|
||||
Utils::clflush(&desc);
|
||||
}
|
||||
|
||||
/* insert translation */
|
||||
addr_t table_phys = Descriptor::Pa::masked(desc);
|
||||
alloc.template with_table<ENTRY>(table_phys,
|
||||
[&] (ENTRY & table) {
|
||||
addr_t const table_vo = vo - (vo & PAGE_MASK);
|
||||
table.insert_translation(table_vo, pa, size, flags, alloc,
|
||||
flush, supported_sizes);
|
||||
},
|
||||
[&] () {
|
||||
error("Unable to get mapped table address for ",
|
||||
Genode::Hex(table_phys));
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
template <typename ALLOCATOR>
|
||||
struct Remove_func
|
||||
{
|
||||
ALLOCATOR & alloc;
|
||||
bool flush;
|
||||
|
||||
Remove_func(ALLOCATOR & alloc, bool flush)
|
||||
: alloc(alloc), flush(flush) { }
|
||||
|
||||
void operator () (addr_t const vo, addr_t /* pa */,
|
||||
size_t const size,
|
||||
Descriptor::access_t &desc)
|
||||
{
|
||||
if (Descriptor::present(desc)) {
|
||||
/* use allocator to retrieve virt address of table */
|
||||
addr_t table_phys = Descriptor::Pa::masked(desc);
|
||||
alloc.template with_table<ENTRY>(table_phys,
|
||||
[&] (ENTRY & table) {
|
||||
addr_t const table_vo = vo - (vo & PAGE_MASK);
|
||||
table.remove_translation(table_vo, size, alloc, flush);
|
||||
if (table.empty()) {
|
||||
alloc.template destruct<ENTRY>(table_phys);
|
||||
desc = 0;
|
||||
|
||||
if (flush)
|
||||
Utils::clflush(&desc);
|
||||
}
|
||||
},
|
||||
[&] () {
|
||||
error("Unable to get mapped table address for ",
|
||||
Genode::Hex(table_phys));
|
||||
});
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template <typename FUNC>
|
||||
void _range_op(addr_t vo, addr_t pa, size_t size, FUNC &&func)
|
||||
{
|
||||
for (size_t i = (vo & SIZE_MASK) >> PAGE_SIZE_LOG2; size > 0;
|
||||
i = (vo & SIZE_MASK) >> PAGE_SIZE_LOG2) {
|
||||
assert (i < MAX_ENTRIES);
|
||||
addr_t end = (vo + PAGE_SIZE) & PAGE_MASK;
|
||||
size_t sz = Genode::min(size, end-vo);
|
||||
|
||||
func(vo, pa, sz, _entries[i]);
|
||||
|
||||
/* check whether we wrap */
|
||||
if (end < vo) return;
|
||||
|
||||
size = size - sz;
|
||||
vo += sz;
|
||||
pa += sz;
|
||||
}
|
||||
}
|
||||
|
||||
protected:
|
||||
|
||||
/**
|
||||
* Return how many entries of an alignment fit into region
|
||||
*/
|
||||
static constexpr size_t _count(size_t region, size_t alignment)
|
||||
{
|
||||
return Genode::align_addr<size_t>(region, (int)alignment)
|
||||
/ (1UL << alignment);
|
||||
}
|
||||
|
||||
public:
|
||||
|
||||
static constexpr size_t MIN_PAGE_SIZE_LOG2 = SIZE_LOG2_4KB;
|
||||
static constexpr size_t ALIGNM_LOG2 = SIZE_LOG2_4KB;
|
||||
|
||||
Pml4_table()
|
||||
{
|
||||
if (!Hw::aligned(this, ALIGNM_LOG2)) throw Misaligned();
|
||||
Genode::memset(&_entries, 0, sizeof(_entries));
|
||||
}
|
||||
|
||||
explicit Pml4_table(Pml4_table & kernel_table) : Pml4_table()
|
||||
{
|
||||
static size_t first = (0xffffffc000000000 & SIZE_MASK) >> PAGE_SIZE_LOG2;
|
||||
for (size_t i = first; i < MAX_ENTRIES; i++)
|
||||
_entries[i] = kernel_table._entries[i];
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns True if table does not contain any page mappings.
|
||||
*
|
||||
* \return false if an entry is present, True otherwise
|
||||
*/
|
||||
bool empty()
|
||||
{
|
||||
for (unsigned i = 0; i < MAX_ENTRIES; i++)
|
||||
if (Descriptor::present(_entries[i]))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename FN>
|
||||
void for_each_entry(FN && fn)
|
||||
{
|
||||
for (unsigned long i = 0; i < MAX_ENTRIES; i++) {
|
||||
if (Descriptor::present(_entries[i]))
|
||||
fn(i, _entries[i]);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Insert translations into this table
|
||||
*
|
||||
* \param vo offset of the virtual region represented
|
||||
* by the translation within the virtual
|
||||
* region represented by this table
|
||||
* \param pa base of the physical backing store
|
||||
* \param size size of the translated region
|
||||
* \param flags mapping flags
|
||||
* \param alloc second level translation table allocator
|
||||
* \param flush flush cache lines of table entries
|
||||
* \param supported_sizes supported page sizes
|
||||
*/
|
||||
template <typename ALLOCATOR>
|
||||
void insert_translation(addr_t vo, addr_t pa, size_t size,
|
||||
Page_flags const & flags, ALLOCATOR & alloc,
|
||||
bool flush, uint32_t supported_sizes) {
|
||||
_range_op(vo, pa, size,
|
||||
Insert_func(flags, alloc, flush, supported_sizes)); }
|
||||
|
||||
/**
|
||||
* Remove translations that overlap with a given virtual region
|
||||
*
|
||||
* \param vo region offset within the tables virtual region
|
||||
* \param size region size
|
||||
* \param alloc second level translation table allocator
|
||||
*/
|
||||
template <typename ALLOCATOR>
|
||||
void remove_translation(addr_t vo, size_t size, ALLOCATOR & alloc,
|
||||
bool flush)
|
||||
{
|
||||
_range_op(vo, 0, size, Remove_func(alloc, flush));
|
||||
}
|
||||
|
||||
} __attribute__((aligned(1 << ALIGNM_LOG2)));
|
||||
|
||||
#endif /* _SRC__DRIVERS__PLATFORM__PC__SPEC__X86_64__PAGE_TABLE_BASE_H_ */
|
14
repos/pc/src/drivers/platform/pc/spec/x86_64/target.mk
Normal file
14
repos/pc/src/drivers/platform/pc/spec/x86_64/target.mk
Normal file
@ -0,0 +1,14 @@
|
||||
TARGET = pc_platform_drv
|
||||
REQUIRES = x86_64
|
||||
|
||||
include $(call select_from_repositories,src/drivers/platform/target.inc)
|
||||
|
||||
SRC_CC += intel/root_table.cc
|
||||
SRC_CC += intel/context_table.cc
|
||||
SRC_CC += intel/managed_root_table.cc
|
||||
SRC_CC += intel/io_mmu.cc
|
||||
SRC_CC += intel/page_table.cc
|
||||
|
||||
INC_DIR += $(PRG_DIR)/../../
|
||||
|
||||
vpath intel/%.cc $(PRG_DIR)/../../
|
Loading…
Reference in New Issue
Block a user