dde_ipxe: fix allocation errors under high TX load

Under high TX load, 'irq_handler()' is rarely executed but packets
are still received since 'netdev_poll()' is called for every TX packet.

'netdev_poll()' not only handles completed transmissions but also
puts the IO buffers of received packets into the RX queue and refills the RX
ring by allocating more IO buffers. However, the IO buffers can only be freed
after they have been dequeued and passed to the 'rx_callback()'. Since
this was only done by 'irq_handler()', we exhaust the slab allocator
under high TX load.

A fix for this is to process the RX queue not only in the IRQ handler
but also when transmitting packets.

genodelabs/genode#4555
This commit is contained in:
Johannes Schlatow 2022-07-26 20:05:05 +02:00 committed by Christian Helmuth
parent e2b61231ae
commit a6a856cb70

View File

@ -174,6 +174,30 @@ static unsigned scan_pci(void)
return NO_DEVICE_FOUND;
}
/**
* Helper for pulling packets from RX queue.
*
* Must be called within dde_lock.
*/
int process_rx_data()
{
struct io_buffer *iobuf;
int received = 0;
while ((iobuf = netdev_rx_dequeue(net_dev))) {
dde_lock_leave();
if (rx_callback) {
rx_callback(1, iobuf->data, iob_len(iobuf));
received++;
}
dde_lock_enter();
free_iob(iobuf);
}
return received;
}
/**
* IRQ handler registered at DDE
@ -190,17 +214,7 @@ static void irq_handler(void *p)
for (unsigned retry = 0; (retry < 2) && !processed_rx_data; retry++) {
/* poll the device for packets and also link-state changes */
netdev_poll(net_dev);
struct io_buffer *iobuf;
while ((iobuf = netdev_rx_dequeue(net_dev))) {
dde_lock_leave();
if (rx_callback) {
rx_callback(1, iobuf->data, iob_len(iobuf));
processed_rx_data = 1;
}
dde_lock_enter();
free_iob(iobuf);
}
processed_rx_data = process_rx_data();
}
dde_lock_leave();
@ -282,6 +296,7 @@ int dde_ipxe_nic_tx(unsigned if_index, const char *packet, unsigned packet_len)
netdev_poll(net_dev);
netdev_tx(net_dev, iob_disown(iobuf));
process_rx_data();
dde_lock_leave();
return 0;