diff --git a/repos/pc/src/driver/platform/pc/intel/invalidator.cc b/repos/pc/src/driver/platform/pc/intel/invalidator.cc index dfa9ee9641..72e94f94b0 100644 --- a/repos/pc/src/driver/platform/pc/intel/invalidator.cc +++ b/repos/pc/src/driver/platform/pc/intel/invalidator.cc @@ -118,3 +118,86 @@ void Intel::Register_invalidator::invalidate_all(Domain_id domain_id, Pci::rid_t invalidate_iotlb(domain_id); } + + +/** + * Clear IOTLB. + * + * By default, we perform a global invalidation. When provided with a valid + * Domain_id, a domain-specific invalidation is conducted. + * + * See Table 25 for required invalidation scopes. + */ +void Intel::Queued_invalidator::invalidate_iotlb(Domain_id domain_id) +{ + unsigned requested_scope = Descriptor::Granularity::GLOBAL; + if (domain_id.valid()) + requested_scope = Descriptor::Granularity::DOMAIN; + + /* clear context cache */ + Descriptor::access_t *entry = _tail(); + Iotlb::Type::set(*entry, Iotlb::Type::IOTLB); + Iotlb::Granularity::set(*entry, requested_scope); + Iotlb::Dr::set(*entry, 1); + Iotlb::Dw::set(*entry, 1); + Iotlb::Domain_id::set(*entry, domain_id.value); + + _next(); + + /* wait for completion */ + while (!_empty()); + + /* + * Note: At the moment we have no practical benefit from implementing + * page-selective invalidation, because + * a) When adding a DMA buffer range, invalidation is only required if + * caching mode is set. This is not supposed to occur on real hardware but + * only in emulators. + * b) Removal of DMA buffer ranges typically occurs only when a domain is + * destructed. In this case, invalidation is not issued for individual + * buffers but for the entire domain once all buffer ranges have been + * removed. + */ +} + + +/** + * Clear context cache + * + * By default, we perform a global invalidation. When provided with a valid + * Domain_id, a domain-specific invalidation is conducted. When a rid is + * provided, a device-specific invalidation is done. + * + * See Table 25 for required invalidation scopes. + */ +void Intel::Queued_invalidator::invalidate_context(Domain_id domain_id, Pci::rid_t rid) +{ + unsigned requested_scope = Descriptor::Granularity::GLOBAL; + if (domain_id.valid()) + requested_scope = Descriptor::Granularity::DOMAIN; + + if (rid != 0) + requested_scope = Descriptor::Granularity::DEVICE_OR_PAGE; + + /* clear context cache */ + Descriptor::access_t *entry = _tail(); + Context::Type::set(*entry, Context::Type::CONTEXT); + Context::Granularity::set(*entry, requested_scope); + Context::Source_id::set(*entry, rid); + Context::Domain_id::set(*entry, domain_id.value); + + _next(); + + /* wait for completion */ + while (!_empty()); +} + + +void Intel::Queued_invalidator::invalidate_all(Domain_id domain_id, Pci::rid_t rid) +{ + invalidate_context(domain_id, rid); + + /* XXX clear PASID cache if we ever switch from legacy mode translation */ + + invalidate_iotlb(domain_id); +} diff --git a/repos/pc/src/driver/platform/pc/intel/invalidator.h b/repos/pc/src/driver/platform/pc/intel/invalidator.h index 3cb5ea3978..4619090cc7 100644 --- a/repos/pc/src/driver/platform/pc/intel/invalidator.h +++ b/repos/pc/src/driver/platform/pc/intel/invalidator.h @@ -16,6 +16,7 @@ /* Genode includes */ #include +#include #include /* local includes */ @@ -27,6 +28,7 @@ namespace Intel { class Io_mmu; /* forward declaration */ class Invalidator; class Register_invalidator; + class Queued_invalidator; } @@ -127,4 +129,113 @@ class Intel::Register_invalidator : public Invalidator }; +class Intel::Queued_invalidator : public Invalidator +{ + private: + + struct Queue_mmio : Mmio<56> + { + struct Head : Register<0x0, 64> + { + }; + + struct Tail : Register<0x8, 64> + { + }; + + struct Address : Register<0x10, 64> + { + struct Size : Bitfield< 0,3> { enum { SIZE_4K = 0}; }; + struct Width : Bitfield<11,1> { enum { WIDTH_128 = 0}; }; + struct Base : Bitfield<12,52> { }; + }; + + Queue_mmio(Byte_range_ptr const &range) + : Mmio<56>(range) + { } + + } _queue_mmio; + + Attached_ram_dataspace _queue; + + /* + * descriptor definitions + */ + + struct Descriptor : Genode::Register<64> + { + struct Type_0_3 : Bitfield<0,4> { }; + struct Type_4_6 : Bitfield<9,3> { }; + struct Type : Bitset_2 + { + enum { + CONTEXT = 1, + IOTLB = 2, + }; + }; + + struct Granularity : Bitfield<4,2> { + enum { + GLOBAL = 0x1, + DOMAIN = 0x2, + DEVICE_OR_PAGE = 0x3 + }; + }; + + struct Domain_id : Bitfield<16,16> { }; + }; + + struct Context : Descriptor + { + struct Source_id : Bitfield<32,16> { }; + }; + + struct Iotlb : Descriptor + { + /* drain reads/writes */ + struct Dw : Bitfield<6,1> { }; + struct Dr : Bitfield<7,1> { }; + }; + + bool _empty() { + return _queue_mmio.read() == _queue_mmio.read(); } + + Descriptor::access_t *_tail() { + return (Descriptor::access_t*)(_queue_mmio.read() + (addr_t)_queue.local_addr()); } + + void _next() + { + addr_t tail_offset = _queue_mmio.read(); + + tail_offset += 16; + if (tail_offset >= 0x1000) + tail_offset = 0; + + _queue_mmio.write(tail_offset); + } + + public: + + void invalidate_iotlb(Domain_id) override; + void invalidate_context(Domain_id domain, Pci::rid_t) override; + void invalidate_all(Domain_id domain = Domain_id { Domain_id::INVALID }, + Pci::rid_t = 0) override; + + Queued_invalidator(Genode::Env & env, addr_t queue_reg_base) + : _queue_mmio({(char*)queue_reg_base, 56}), + _queue(env.ram(), env.rm(), 4096, Cache::CACHED) + { + /* set tail register to zero*/ + _queue_mmio.write(0); + + /* write physical address of invalidation queue into address register */ + using Address = Queue_mmio::Address; + addr_t queue_paddr = env.pd().dma_addr(_queue.cap()); + _queue_mmio.write
(Address::Base::masked(queue_paddr) | + Address::Size::bits(Address::Size::SIZE_4K) | + Address::Width::bits(Address::Width::WIDTH_128)); + } +}; + + #endif /* _SRC__DRIVERS__PLATFORM__INTEL__INVALIDATOR_H_ */ diff --git a/repos/pc/src/driver/platform/pc/intel/io_mmu.cc b/repos/pc/src/driver/platform/pc/intel/io_mmu.cc index 4033f17459..4f8b990e41 100644 --- a/repos/pc/src/driver/platform/pc/intel/io_mmu.cc +++ b/repos/pc/src/driver/platform/pc/intel/io_mmu.cc @@ -138,7 +138,10 @@ void Intel::Io_mmu::flush_write_buffer() Intel::Invalidator & Intel::Io_mmu::invalidator() { - return *_register_invalidator; + if (!read()) + return *_register_invalidator; + else + return *_queued_invalidator; } @@ -154,6 +157,12 @@ void Intel::Io_mmu::_handle_faults() if (read()) error("Invalidation queue error"); + if (read()) + error("Invalidation completion error"); + + if (read()) + error("Invalidation time-out error"); + /* acknowledge all faults */ write(0x7d); @@ -336,6 +345,12 @@ void Intel::Io_mmu::resume() if (read() && read()) _global_command(false); + if (read()) { + /* enable queued invalidation if supported */ + _queued_invalidator.construct(_env, base() + 0x80); + _global_command(true); + } + /* restore fault events only if kernel did not enable IRQ remapping */ if (!read()) { write(_s3_fec); @@ -386,12 +401,14 @@ Intel::Io_mmu::Io_mmu(Env & env, /* disable queued invalidation interface */ if (read()) _global_command(false); + } - if (read() > 5) { - error("Register-based invalidation only ", - "supported in architecture versions 5 and lower"); - } - + if (read()) { + /* enable queued invalidation if supported */ + _queued_invalidator.construct(_env, base() + 0x80); + _global_command(true); + } else { + /* use register-based invalidation interface as fallback */ addr_t context_reg_base = base() + 0x28; addr_t iotlb_reg_base = base() + 8*_offset(); _register_invalidator.construct(context_reg_base, iotlb_reg_base, _verbose); diff --git a/repos/pc/src/driver/platform/pc/intel/io_mmu.h b/repos/pc/src/driver/platform/pc/intel/io_mmu.h index a2da53ee19..a920db504f 100644 --- a/repos/pc/src/driver/platform/pc/intel/io_mmu.h +++ b/repos/pc/src/driver/platform/pc/intel/io_mmu.h @@ -195,6 +195,7 @@ class Intel::Io_mmu : private Attached_mmio<0x800>, _env.ep(), *this, &Io_mmu::_handle_faults }; Constructible _register_invalidator { }; + Constructible _queued_invalidator { }; /** * Registers @@ -243,6 +244,9 @@ class Intel::Io_mmu : private Attached_mmio<0x800>, /* interrupt remapping support */ struct Ir : Bitfield<3,1> { }; + /* queued-invalidation support */ + struct Qi : Bitfield<1,1> { }; + struct Page_walk_coherency : Bitfield<0,1> { }; }; @@ -297,6 +301,12 @@ class Intel::Io_mmu : private Attached_mmio<0x800>, /* fault record index */ struct Fri : Bitfield<8,8> { }; + /* invalidation time-out error */ + struct Ite : Bitfield<6,1> { }; + + /* invalidation completion error */ + struct Ice : Bitfield<5,1> { }; + /* invalidation queue error */ struct Iqe : Bitfield<4,1> { };