mirror of
https://github.com/genodelabs/genode.git
synced 2025-04-07 11:27:29 +00:00
qemu-usb: access I/O buffers directly
This commit replaces the mapping of DMA buffers and gets rid of the bounce-buffer handling, which was introduced to prevent data corruption noticed when utilizing USB storage with Windows 10 guests, with accessing the buffers directly. Due to the way Windows 10 at times manages its DMA memory (many small pieces instead of few larger ones) the unbounded registry becomes a problem when containing stale entries. Changing the 'qemu-usb' implementation allows for using 'read_dma' and 'write_dma' directly. Fixes #5121.
This commit is contained in:
parent
cda48b4a49
commit
35638568c5
@ -68,8 +68,6 @@ namespace Qemu {
|
||||
virtual void raise_interrupt(int assert) = 0;
|
||||
virtual int read_dma(addr_t addr, void *buf, size_t size) = 0;
|
||||
virtual int write_dma(addr_t addr, void const *buf, size_t size) = 0;
|
||||
virtual void *map_dma(addr_t base, size_t size, Dma_direction dir) = 0;
|
||||
virtual void unmap_dma(void *addr, size_t size, Dma_direction dir) = 0;
|
||||
};
|
||||
|
||||
|
||||
|
@ -947,8 +947,10 @@ size_t iov_from_buf(const struct iovec *iov, unsigned int iov_cnt,
|
||||
unsigned int i;
|
||||
for (i = 0, done = 0; (offset || done < bytes) && i < iov_cnt; i++) {
|
||||
if (offset < iov[i].iov_len) {
|
||||
size_t len = Genode::min(iov[i].iov_len - offset, bytes - done);
|
||||
memcpy((char*)iov[i].iov_base + offset, (char*)buf + done, len);
|
||||
size_t const len = Genode::min(iov[i].iov_len - offset, bytes - done);
|
||||
Qemu::addr_t const dma_addr = (Qemu::addr_t)((char *)iov[i].iov_base + offset);
|
||||
|
||||
_pci_device->write_dma(dma_addr, (char*)buf + done, len);
|
||||
done += len;
|
||||
offset = 0;
|
||||
} else {
|
||||
@ -969,8 +971,10 @@ size_t iov_to_buf(const struct iovec *iov, const unsigned int iov_cnt,
|
||||
unsigned int i;
|
||||
for (i = 0, done = 0; (offset || done < bytes) && i < iov_cnt; i++) {
|
||||
if (offset < iov[i].iov_len) {
|
||||
size_t len = Genode::min(iov[i].iov_len - offset, bytes - done);
|
||||
memcpy((char*)buf + done, (char*)iov[i].iov_base + offset, len);
|
||||
size_t const len = Genode::min(iov[i].iov_len - offset, bytes - done);
|
||||
Qemu::addr_t const dma_addr = (Qemu::addr_t)((char *)iov[i].iov_base + offset);
|
||||
|
||||
_pci_device->read_dma(dma_addr, (char*)buf + done, len);
|
||||
done += len;
|
||||
offset = 0;
|
||||
} else {
|
||||
@ -997,55 +1001,23 @@ void qemu_sglist_destroy(QEMUSGList *sgl) {
|
||||
|
||||
int usb_packet_map(USBPacket *p, QEMUSGList *sgl)
|
||||
{
|
||||
Qemu::Pci_device::Dma_direction dir =
|
||||
(p->pid == USB_TOKEN_IN) ? Qemu::Pci_device::Dma_direction::IN
|
||||
: Qemu::Pci_device::Dma_direction::OUT;
|
||||
|
||||
void *mem;
|
||||
|
||||
/*
|
||||
* We add the SGL entries themself to be able to call 'read_dma'
|
||||
* and 'write_dma' directly (and to satisfy asserts in the contrib
|
||||
* code).
|
||||
*/
|
||||
for (int i = 0; i < sgl->niov; i++) {
|
||||
dma_addr_t base = (dma_addr_t) sgl->iov[i].iov_base;
|
||||
dma_addr_t len = sgl->iov[i].iov_len;
|
||||
dma_addr_t const base = (dma_addr_t) sgl->iov[i].iov_base;
|
||||
dma_addr_t const len = sgl->iov[i].iov_len;
|
||||
|
||||
while (len) {
|
||||
dma_addr_t xlen = len;
|
||||
mem = _pci_device->map_dma(base, xlen, dir);
|
||||
if (verbose_iov)
|
||||
Genode::log("mem: ", mem, " base: ", (void *)base, " len: ",
|
||||
Genode::Hex(len));
|
||||
|
||||
if (!mem) {
|
||||
goto err;
|
||||
}
|
||||
if (xlen > len) {
|
||||
xlen = len;
|
||||
}
|
||||
qemu_iovec_add(&p->iov, mem, xlen);
|
||||
len -= xlen;
|
||||
base += xlen;
|
||||
}
|
||||
qemu_iovec_add(&p->iov, (void*)base, len);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
Genode::error("could not map dma");
|
||||
usb_packet_unmap(p, sgl);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
void usb_packet_unmap(USBPacket *p, QEMUSGList *sgl)
|
||||
{
|
||||
Qemu::Pci_device::Dma_direction dir =
|
||||
(p->pid == USB_TOKEN_IN) ? Qemu::Pci_device::Dma_direction::IN
|
||||
: Qemu::Pci_device::Dma_direction::OUT;
|
||||
|
||||
for (int i = 0; i < p->iov.niov; i++) {
|
||||
_pci_device->unmap_dma(p->iov.iov[i].iov_base,
|
||||
p->iov.iov[i].iov_len,
|
||||
dir);
|
||||
}
|
||||
}
|
||||
void usb_packet_unmap(USBPacket *p, QEMUSGList *sgl) { }
|
||||
|
||||
|
||||
/******************
|
||||
|
@ -291,33 +291,8 @@ struct Timer_queue : public Qemu::Timer_queue
|
||||
|
||||
struct Pci_device : public Qemu::Pci_device
|
||||
{
|
||||
Libc::Allocator _alloc { };
|
||||
|
||||
PPDMDEVINS pci_dev;
|
||||
|
||||
struct Dma_bounce_buffer
|
||||
{
|
||||
Genode::Allocator &_alloc;
|
||||
|
||||
Qemu::addr_t const base;
|
||||
Qemu::size_t const size;
|
||||
void * const addr { _alloc.alloc(size) };
|
||||
|
||||
Dma_bounce_buffer(Genode::Allocator &alloc,
|
||||
Qemu::addr_t base,
|
||||
Qemu::size_t size)
|
||||
: _alloc { alloc }, base { base }, size { size }
|
||||
{ }
|
||||
|
||||
virtual ~Dma_bounce_buffer()
|
||||
{
|
||||
_alloc.free(addr, size);
|
||||
}
|
||||
};
|
||||
|
||||
using Reg_dma_buffer = Genode::Registered<Dma_bounce_buffer>;
|
||||
Genode::Registry<Reg_dma_buffer> _dma_buffers { };
|
||||
|
||||
Pci_device(PPDMDEVINS pDevIns) : pci_dev(pDevIns) { }
|
||||
|
||||
void raise_interrupt(int level) override {
|
||||
@ -328,44 +303,6 @@ struct Pci_device : public Qemu::Pci_device
|
||||
|
||||
int write_dma(Qemu::addr_t addr, void const *buf, Qemu::size_t size) override {
|
||||
return PDMDevHlpPhysWrite(pci_dev, addr, buf, size); }
|
||||
|
||||
void *map_dma(Qemu::addr_t base, Qemu::size_t size,
|
||||
Qemu::Pci_device::Dma_direction dir) override
|
||||
{
|
||||
Reg_dma_buffer *dma = nullptr;
|
||||
|
||||
try {
|
||||
dma = new (_alloc) Reg_dma_buffer(_dma_buffers,
|
||||
_alloc, base, size);
|
||||
} catch (...) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
/* copy data for write request to bounce buffer */
|
||||
if (dir == Qemu::Pci_device::Dma_direction::OUT) {
|
||||
(void)PDMDevHlpPhysRead(pci_dev, base, dma->addr, size);
|
||||
}
|
||||
|
||||
return dma->addr;
|
||||
}
|
||||
|
||||
void unmap_dma(void *addr, Qemu::size_t size,
|
||||
Qemu::Pci_device::Dma_direction dir) override
|
||||
{
|
||||
_dma_buffers.for_each([&] (Reg_dma_buffer &dma) {
|
||||
if (dma.addr != addr) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* copy data for read request from bounce buffer */
|
||||
if (dir == Qemu::Pci_device::Dma_direction::IN) {
|
||||
(void)PDMDevHlpPhysWrite(pci_dev,
|
||||
dma.base, dma.addr, dma.size);
|
||||
}
|
||||
|
||||
Genode::destroy(_alloc, &dma);
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
@ -299,99 +299,10 @@ struct Timer_queue : public Qemu::Timer_queue
|
||||
|
||||
struct Pci_device : public Qemu::Pci_device
|
||||
{
|
||||
Genode::Allocator &_alloc;
|
||||
Genode::Allocator_avl _dma_alloc;
|
||||
|
||||
PPDMDEVINS pci_dev;
|
||||
|
||||
struct Bounce_buffer
|
||||
{
|
||||
Genode::Allocator &_alloc;
|
||||
Genode::Allocator_avl &_range_alloc;
|
||||
|
||||
struct Dma
|
||||
{
|
||||
void * addr;
|
||||
|
||||
Qemu::addr_t base;
|
||||
Qemu::size_t size;
|
||||
};
|
||||
|
||||
Dma _dma { 0, 0 };
|
||||
|
||||
enum : uint32_t { DMA_BUFFER_CHUNK = 4u << 20, };
|
||||
|
||||
static bool _increase_dma_alloc(Genode::Allocator &alloc,
|
||||
Genode::Allocator_avl &range_alloc)
|
||||
{
|
||||
void const * const result =
|
||||
alloc.try_alloc(DMA_BUFFER_CHUNK).convert<void*>(
|
||||
[&] (void *ptr) {
|
||||
range_alloc.add_range((Genode::addr_t)ptr, DMA_BUFFER_CHUNK);
|
||||
return ptr;
|
||||
},
|
||||
[&] (Genode::Allocator::Alloc_error) -> void * {
|
||||
return nullptr; });
|
||||
return !!result;
|
||||
}
|
||||
|
||||
Bounce_buffer(Genode::Allocator &alloc,
|
||||
Genode::Allocator_avl &range_alloc)
|
||||
: _alloc { alloc }, _range_alloc { range_alloc } { }
|
||||
|
||||
bool alloc_dma(Qemu::addr_t base, Qemu::size_t size)
|
||||
{
|
||||
/* treat too large allocations as error for now */
|
||||
if (size > DMA_BUFFER_CHUNK) {
|
||||
Genode::error(__func__, ": denying allocation, size: ",
|
||||
size, " larger than chunk: ", (unsigned)DMA_BUFFER_CHUNK);
|
||||
return false;
|
||||
}
|
||||
|
||||
void *dma_buffer = nullptr;
|
||||
for (int retry = 0; retry < 1; retry++) {
|
||||
dma_buffer = _range_alloc.alloc_aligned(size, 12).convert<void*>(
|
||||
[&] (void *ptr) { return ptr; },
|
||||
[&] (Genode::Allocator::Alloc_error) -> void *{
|
||||
(void)_increase_dma_alloc(_alloc, _range_alloc);
|
||||
return nullptr;
|
||||
});
|
||||
|
||||
if (dma_buffer)
|
||||
break;
|
||||
}
|
||||
if (!dma_buffer)
|
||||
return false;
|
||||
|
||||
_dma = Dma { dma_buffer, base, size };
|
||||
return true;
|
||||
}
|
||||
|
||||
void free_dma()
|
||||
{
|
||||
_range_alloc.free(_dma.addr, _dma.size);
|
||||
_dma = Dma { nullptr, 0, 0 };
|
||||
}
|
||||
|
||||
bool used() const
|
||||
{
|
||||
return _dma.base != 0;
|
||||
}
|
||||
|
||||
virtual ~Bounce_buffer() { }
|
||||
};
|
||||
|
||||
using Reg_bounce_buffer = Genode::Registered<Bounce_buffer>;
|
||||
Genode::Registry<Reg_bounce_buffer> _bounce_buffers { };
|
||||
|
||||
Pci_device(Genode::Allocator &alloc, PPDMDEVINS pDevIns)
|
||||
:
|
||||
_alloc { alloc }, _dma_alloc { &_alloc }, pci_dev { pDevIns }
|
||||
{
|
||||
/* show problem early */
|
||||
if (!Bounce_buffer::_increase_dma_alloc(_alloc, _dma_alloc))
|
||||
Genode::error("could not allocate USB DMA buffer memory");
|
||||
}
|
||||
: pci_dev { pDevIns } { }
|
||||
|
||||
void raise_interrupt(int level) override {
|
||||
PDMDevHlpPCISetIrqNoWait(pci_dev, 0, level); }
|
||||
@ -401,59 +312,6 @@ struct Pci_device : public Qemu::Pci_device
|
||||
|
||||
int write_dma(Qemu::addr_t addr, void const *buf, Qemu::size_t size) override {
|
||||
return PDMDevHlpPhysWrite(pci_dev, addr, buf, size); }
|
||||
|
||||
void *map_dma(Qemu::addr_t base, Qemu::size_t size,
|
||||
Qemu::Pci_device::Dma_direction dir) override
|
||||
{
|
||||
Reg_bounce_buffer *bb = nullptr;
|
||||
_bounce_buffers.for_each([&] (Reg_bounce_buffer ®_bb) {
|
||||
if (bb)
|
||||
return;
|
||||
|
||||
if (!reg_bb.used())
|
||||
bb = ®_bb;
|
||||
});
|
||||
|
||||
if (!bb)
|
||||
try {
|
||||
bb = new (_alloc) Reg_bounce_buffer(_bounce_buffers,
|
||||
_alloc,
|
||||
_dma_alloc);
|
||||
} catch (...) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if (!bb->alloc_dma(base, size))
|
||||
return nullptr;
|
||||
|
||||
/* copy data for write request to bounce buffer */
|
||||
if (dir == Qemu::Pci_device::Dma_direction::OUT) {
|
||||
(void)PDMDevHlpPhysRead(pci_dev, bb->_dma.base,
|
||||
bb->_dma.addr, bb->_dma.size);
|
||||
}
|
||||
|
||||
return bb->_dma.addr;
|
||||
}
|
||||
|
||||
void unmap_dma(void *addr, Qemu::size_t size,
|
||||
Qemu::Pci_device::Dma_direction dir) override
|
||||
{
|
||||
_bounce_buffers.for_each([&] (Reg_bounce_buffer ®_bb) {
|
||||
if (reg_bb._dma.addr != addr) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* copy data for read request from bounce buffer */
|
||||
if (dir == Qemu::Pci_device::Dma_direction::IN) {
|
||||
(void)PDMDevHlpPhysWrite(pci_dev,
|
||||
reg_bb._dma.base,
|
||||
reg_bb._dma.addr,
|
||||
reg_bb._dma.size);
|
||||
}
|
||||
|
||||
reg_bb.free_dma();
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user