mirror of https://github.com/ipxe/ipxe.git
[intel] Push new RX descriptors in batches
Inside a virtual machine, writing the RX ring tail pointer may incur a substantial overhead of processing inside the hypervisor. Minimise this overhead by writing the tail pointer once per batch of descriptors, rather than once per descriptor. Profiling under qemu-kvm (version 1.6.2) shows that this reduces the amount of time taken to refill the RX descriptor ring by around 90%. Signed-off-by: Michael Brown <mcb30@ipxe.org>pull/17/head
parent
8a3dcefc0c
commit
b2c7b6a85e
|
@ -456,19 +456,20 @@ void intel_refill_rx ( struct intel_nic *intel ) {
|
||||||
unsigned int rx_idx;
|
unsigned int rx_idx;
|
||||||
unsigned int rx_tail;
|
unsigned int rx_tail;
|
||||||
physaddr_t address;
|
physaddr_t address;
|
||||||
|
unsigned int refilled = 0;
|
||||||
|
|
||||||
|
/* Refill ring */
|
||||||
while ( ( intel->rx.prod - intel->rx.cons ) < INTEL_RX_FILL ) {
|
while ( ( intel->rx.prod - intel->rx.cons ) < INTEL_RX_FILL ) {
|
||||||
|
|
||||||
/* Allocate I/O buffer */
|
/* Allocate I/O buffer */
|
||||||
iobuf = alloc_iob ( INTEL_RX_MAX_LEN );
|
iobuf = alloc_iob ( INTEL_RX_MAX_LEN );
|
||||||
if ( ! iobuf ) {
|
if ( ! iobuf ) {
|
||||||
/* Wait for next refill */
|
/* Wait for next refill */
|
||||||
return;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Get next receive descriptor */
|
/* Get next receive descriptor */
|
||||||
rx_idx = ( intel->rx.prod++ % INTEL_NUM_RX_DESC );
|
rx_idx = ( intel->rx.prod++ % INTEL_NUM_RX_DESC );
|
||||||
rx_tail = ( intel->rx.prod % INTEL_NUM_RX_DESC );
|
|
||||||
rx = &intel->rx.desc[rx_idx];
|
rx = &intel->rx.desc[rx_idx];
|
||||||
|
|
||||||
/* Populate receive descriptor */
|
/* Populate receive descriptor */
|
||||||
|
@ -477,20 +478,24 @@ void intel_refill_rx ( struct intel_nic *intel ) {
|
||||||
rx->length = 0;
|
rx->length = 0;
|
||||||
rx->status = 0;
|
rx->status = 0;
|
||||||
rx->errors = 0;
|
rx->errors = 0;
|
||||||
wmb();
|
|
||||||
|
|
||||||
/* Record I/O buffer */
|
/* Record I/O buffer */
|
||||||
assert ( intel->rx_iobuf[rx_idx] == NULL );
|
assert ( intel->rx_iobuf[rx_idx] == NULL );
|
||||||
intel->rx_iobuf[rx_idx] = iobuf;
|
intel->rx_iobuf[rx_idx] = iobuf;
|
||||||
|
|
||||||
/* Push descriptor to card */
|
|
||||||
profile_start ( &intel_vm_refill_profiler );
|
|
||||||
writel ( rx_tail, intel->regs + intel->rx.reg + INTEL_xDT );
|
|
||||||
profile_stop ( &intel_vm_refill_profiler );
|
|
||||||
|
|
||||||
DBGC2 ( intel, "INTEL %p RX %d is [%llx,%llx)\n", intel, rx_idx,
|
DBGC2 ( intel, "INTEL %p RX %d is [%llx,%llx)\n", intel, rx_idx,
|
||||||
( ( unsigned long long ) address ),
|
( ( unsigned long long ) address ),
|
||||||
( ( unsigned long long ) address + INTEL_RX_MAX_LEN ) );
|
( ( unsigned long long ) address + INTEL_RX_MAX_LEN ) );
|
||||||
|
refilled++;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Push descriptors to card, if applicable */
|
||||||
|
if ( refilled ) {
|
||||||
|
wmb();
|
||||||
|
rx_tail = ( intel->rx.prod % INTEL_NUM_RX_DESC );
|
||||||
|
profile_start ( &intel_vm_refill_profiler );
|
||||||
|
writel ( rx_tail, intel->regs + intel->rx.reg + INTEL_xDT );
|
||||||
|
profile_stop ( &intel_vm_refill_profiler );
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue