mirror of
https://github.com/genodelabs/genode.git
synced 2025-03-15 00:36:34 +00:00
parent
4790bc03fc
commit
3c5b88111c
@ -118,3 +118,86 @@ void Intel::Register_invalidator::invalidate_all(Domain_id domain_id, Pci::rid_t
|
||||
|
||||
invalidate_iotlb(domain_id);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Clear IOTLB.
|
||||
*
|
||||
* By default, we perform a global invalidation. When provided with a valid
|
||||
* Domain_id, a domain-specific invalidation is conducted.
|
||||
*
|
||||
* See Table 25 for required invalidation scopes.
|
||||
*/
|
||||
void Intel::Queued_invalidator::invalidate_iotlb(Domain_id domain_id)
|
||||
{
|
||||
unsigned requested_scope = Descriptor::Granularity::GLOBAL;
|
||||
if (domain_id.valid())
|
||||
requested_scope = Descriptor::Granularity::DOMAIN;
|
||||
|
||||
/* clear context cache */
|
||||
Descriptor::access_t *entry = _tail();
|
||||
Iotlb::Type::set(*entry, Iotlb::Type::IOTLB);
|
||||
Iotlb::Granularity::set(*entry, requested_scope);
|
||||
Iotlb::Dr::set(*entry, 1);
|
||||
Iotlb::Dw::set(*entry, 1);
|
||||
Iotlb::Domain_id::set(*entry, domain_id.value);
|
||||
|
||||
_next();
|
||||
|
||||
/* wait for completion */
|
||||
while (!_empty());
|
||||
|
||||
/*
|
||||
* Note: At the moment we have no practical benefit from implementing
|
||||
* page-selective invalidation, because
|
||||
* a) When adding a DMA buffer range, invalidation is only required if
|
||||
* caching mode is set. This is not supposed to occur on real hardware but
|
||||
* only in emulators.
|
||||
* b) Removal of DMA buffer ranges typically occurs only when a domain is
|
||||
* destructed. In this case, invalidation is not issued for individual
|
||||
* buffers but for the entire domain once all buffer ranges have been
|
||||
* removed.
|
||||
*/
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Clear context cache
|
||||
*
|
||||
* By default, we perform a global invalidation. When provided with a valid
|
||||
* Domain_id, a domain-specific invalidation is conducted. When a rid is
|
||||
* provided, a device-specific invalidation is done.
|
||||
*
|
||||
* See Table 25 for required invalidation scopes.
|
||||
*/
|
||||
void Intel::Queued_invalidator::invalidate_context(Domain_id domain_id, Pci::rid_t rid)
|
||||
{
|
||||
unsigned requested_scope = Descriptor::Granularity::GLOBAL;
|
||||
if (domain_id.valid())
|
||||
requested_scope = Descriptor::Granularity::DOMAIN;
|
||||
|
||||
if (rid != 0)
|
||||
requested_scope = Descriptor::Granularity::DEVICE_OR_PAGE;
|
||||
|
||||
/* clear context cache */
|
||||
Descriptor::access_t *entry = _tail();
|
||||
Context::Type::set(*entry, Context::Type::CONTEXT);
|
||||
Context::Granularity::set(*entry, requested_scope);
|
||||
Context::Source_id::set(*entry, rid);
|
||||
Context::Domain_id::set(*entry, domain_id.value);
|
||||
|
||||
_next();
|
||||
|
||||
/* wait for completion */
|
||||
while (!_empty());
|
||||
}
|
||||
|
||||
|
||||
void Intel::Queued_invalidator::invalidate_all(Domain_id domain_id, Pci::rid_t rid)
|
||||
{
|
||||
invalidate_context(domain_id, rid);
|
||||
|
||||
/* XXX clear PASID cache if we ever switch from legacy mode translation */
|
||||
|
||||
invalidate_iotlb(domain_id);
|
||||
}
|
||||
|
@ -16,6 +16,7 @@
|
||||
|
||||
/* Genode includes */
|
||||
#include <util/mmio.h>
|
||||
#include <base/attached_ram_dataspace.h>
|
||||
#include <pci/types.h>
|
||||
|
||||
/* local includes */
|
||||
@ -27,6 +28,7 @@ namespace Intel {
|
||||
class Io_mmu; /* forward declaration */
|
||||
class Invalidator;
|
||||
class Register_invalidator;
|
||||
class Queued_invalidator;
|
||||
}
|
||||
|
||||
|
||||
@ -127,4 +129,113 @@ class Intel::Register_invalidator : public Invalidator
|
||||
};
|
||||
|
||||
|
||||
class Intel::Queued_invalidator : public Invalidator
|
||||
{
|
||||
private:
|
||||
|
||||
struct Queue_mmio : Mmio<56>
|
||||
{
|
||||
struct Head : Register<0x0, 64>
|
||||
{
|
||||
};
|
||||
|
||||
struct Tail : Register<0x8, 64>
|
||||
{
|
||||
};
|
||||
|
||||
struct Address : Register<0x10, 64>
|
||||
{
|
||||
struct Size : Bitfield< 0,3> { enum { SIZE_4K = 0}; };
|
||||
struct Width : Bitfield<11,1> { enum { WIDTH_128 = 0}; };
|
||||
struct Base : Bitfield<12,52> { };
|
||||
};
|
||||
|
||||
Queue_mmio(Byte_range_ptr const &range)
|
||||
: Mmio<56>(range)
|
||||
{ }
|
||||
|
||||
} _queue_mmio;
|
||||
|
||||
Attached_ram_dataspace _queue;
|
||||
|
||||
/*
|
||||
* descriptor definitions
|
||||
*/
|
||||
|
||||
struct Descriptor : Genode::Register<64>
|
||||
{
|
||||
struct Type_0_3 : Bitfield<0,4> { };
|
||||
struct Type_4_6 : Bitfield<9,3> { };
|
||||
struct Type : Bitset_2<Type_0_3, Type_4_6>
|
||||
{
|
||||
enum {
|
||||
CONTEXT = 1,
|
||||
IOTLB = 2,
|
||||
};
|
||||
};
|
||||
|
||||
struct Granularity : Bitfield<4,2> {
|
||||
enum {
|
||||
GLOBAL = 0x1,
|
||||
DOMAIN = 0x2,
|
||||
DEVICE_OR_PAGE = 0x3
|
||||
};
|
||||
};
|
||||
|
||||
struct Domain_id : Bitfield<16,16> { };
|
||||
};
|
||||
|
||||
struct Context : Descriptor
|
||||
{
|
||||
struct Source_id : Bitfield<32,16> { };
|
||||
};
|
||||
|
||||
struct Iotlb : Descriptor
|
||||
{
|
||||
/* drain reads/writes */
|
||||
struct Dw : Bitfield<6,1> { };
|
||||
struct Dr : Bitfield<7,1> { };
|
||||
};
|
||||
|
||||
bool _empty() {
|
||||
return _queue_mmio.read<Queue_mmio::Head>() == _queue_mmio.read<Queue_mmio::Tail>(); }
|
||||
|
||||
Descriptor::access_t *_tail() {
|
||||
return (Descriptor::access_t*)(_queue_mmio.read<Queue_mmio::Tail>() + (addr_t)_queue.local_addr<void>()); }
|
||||
|
||||
void _next()
|
||||
{
|
||||
addr_t tail_offset = _queue_mmio.read<Queue_mmio::Tail>();
|
||||
|
||||
tail_offset += 16;
|
||||
if (tail_offset >= 0x1000)
|
||||
tail_offset = 0;
|
||||
|
||||
_queue_mmio.write<Queue_mmio::Tail>(tail_offset);
|
||||
}
|
||||
|
||||
public:
|
||||
|
||||
void invalidate_iotlb(Domain_id) override;
|
||||
void invalidate_context(Domain_id domain, Pci::rid_t) override;
|
||||
void invalidate_all(Domain_id domain = Domain_id { Domain_id::INVALID },
|
||||
Pci::rid_t = 0) override;
|
||||
|
||||
Queued_invalidator(Genode::Env & env, addr_t queue_reg_base)
|
||||
: _queue_mmio({(char*)queue_reg_base, 56}),
|
||||
_queue(env.ram(), env.rm(), 4096, Cache::CACHED)
|
||||
{
|
||||
/* set tail register to zero*/
|
||||
_queue_mmio.write<Queue_mmio::Tail>(0);
|
||||
|
||||
/* write physical address of invalidation queue into address register */
|
||||
using Address = Queue_mmio::Address;
|
||||
addr_t queue_paddr = env.pd().dma_addr(_queue.cap());
|
||||
_queue_mmio.write<Address>(Address::Base::masked(queue_paddr) |
|
||||
Address::Size::bits(Address::Size::SIZE_4K) |
|
||||
Address::Width::bits(Address::Width::WIDTH_128));
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
#endif /* _SRC__DRIVERS__PLATFORM__INTEL__INVALIDATOR_H_ */
|
||||
|
@ -138,7 +138,10 @@ void Intel::Io_mmu::flush_write_buffer()
|
||||
|
||||
Intel::Invalidator & Intel::Io_mmu::invalidator()
|
||||
{
|
||||
return *_register_invalidator;
|
||||
if (!read<Global_status::Qies>())
|
||||
return *_register_invalidator;
|
||||
else
|
||||
return *_queued_invalidator;
|
||||
}
|
||||
|
||||
|
||||
@ -154,6 +157,12 @@ void Intel::Io_mmu::_handle_faults()
|
||||
if (read<Fault_status::Iqe>())
|
||||
error("Invalidation queue error");
|
||||
|
||||
if (read<Fault_status::Ice>())
|
||||
error("Invalidation completion error");
|
||||
|
||||
if (read<Fault_status::Ite>())
|
||||
error("Invalidation time-out error");
|
||||
|
||||
/* acknowledge all faults */
|
||||
write<Fault_status>(0x7d);
|
||||
|
||||
@ -336,6 +345,12 @@ void Intel::Io_mmu::resume()
|
||||
if (read<Global_status::Enabled>() && read<Global_status::Qies>())
|
||||
_global_command<Global_command::Qie>(false);
|
||||
|
||||
if (read<Extended_capability::Qi>()) {
|
||||
/* enable queued invalidation if supported */
|
||||
_queued_invalidator.construct(_env, base() + 0x80);
|
||||
_global_command<Global_command::Qie>(true);
|
||||
}
|
||||
|
||||
/* restore fault events only if kernel did not enable IRQ remapping */
|
||||
if (!read<Global_status::Ires>()) {
|
||||
write<Fault_event_control>(_s3_fec);
|
||||
@ -386,12 +401,14 @@ Intel::Io_mmu::Io_mmu(Env & env,
|
||||
/* disable queued invalidation interface */
|
||||
if (read<Global_status::Qies>())
|
||||
_global_command<Global_command::Qie>(false);
|
||||
}
|
||||
|
||||
if (read<Version::Major>() > 5) {
|
||||
error("Register-based invalidation only ",
|
||||
"supported in architecture versions 5 and lower");
|
||||
}
|
||||
|
||||
if (read<Extended_capability::Qi>()) {
|
||||
/* enable queued invalidation if supported */
|
||||
_queued_invalidator.construct(_env, base() + 0x80);
|
||||
_global_command<Global_command::Qie>(true);
|
||||
} else {
|
||||
/* use register-based invalidation interface as fallback */
|
||||
addr_t context_reg_base = base() + 0x28;
|
||||
addr_t iotlb_reg_base = base() + 8*_offset<Extended_capability::Iro>();
|
||||
_register_invalidator.construct(context_reg_base, iotlb_reg_base, _verbose);
|
||||
|
@ -195,6 +195,7 @@ class Intel::Io_mmu : private Attached_mmio<0x800>,
|
||||
_env.ep(), *this, &Io_mmu::_handle_faults };
|
||||
|
||||
Constructible<Register_invalidator> _register_invalidator { };
|
||||
Constructible<Queued_invalidator> _queued_invalidator { };
|
||||
|
||||
/**
|
||||
* Registers
|
||||
@ -243,6 +244,9 @@ class Intel::Io_mmu : private Attached_mmio<0x800>,
|
||||
/* interrupt remapping support */
|
||||
struct Ir : Bitfield<3,1> { };
|
||||
|
||||
/* queued-invalidation support */
|
||||
struct Qi : Bitfield<1,1> { };
|
||||
|
||||
struct Page_walk_coherency : Bitfield<0,1> { };
|
||||
};
|
||||
|
||||
@ -297,6 +301,12 @@ class Intel::Io_mmu : private Attached_mmio<0x800>,
|
||||
/* fault record index */
|
||||
struct Fri : Bitfield<8,8> { };
|
||||
|
||||
/* invalidation time-out error */
|
||||
struct Ite : Bitfield<6,1> { };
|
||||
|
||||
/* invalidation completion error */
|
||||
struct Ice : Bitfield<5,1> { };
|
||||
|
||||
/* invalidation queue error */
|
||||
struct Iqe : Bitfield<4,1> { };
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user