[dma] Modify DMA API to simplify calculation of medial addresses

Redefine the value stored within a DMA mapping to be the offset
between physical addresses and DMA addresses within the mapped region.

Provide a dma() wrapper function to calculate the DMA address for any
pointer within a mapped region, thereby simplifying the use cases when
a device needs to be given addresses other than the region start
address.

On a platform using the "flat" DMA implementation the DMA offset for
any mapped region is always zero, with the result that dma_map() can
be optimised away completely and dma() reduces to a straightforward
call to virt_to_phys().

Signed-off-by: Michael Brown <mcb30@ipxe.org>
pull/181/head
Michael Brown 2020-11-25 15:52:00 +00:00
parent 24ef743778
commit cf12a41703
7 changed files with 100 additions and 58 deletions

View File

@ -46,6 +46,7 @@ PROVIDE_DMAAPI_INLINE ( flat, dma_unmap );
PROVIDE_DMAAPI_INLINE ( flat, dma_alloc );
PROVIDE_DMAAPI_INLINE ( flat, dma_free );
PROVIDE_DMAAPI_INLINE ( flat, dma_set_mask );
PROVIDE_DMAAPI_INLINE ( flat, dma_phys );
/******************************************************************************
*
@ -138,6 +139,7 @@ PROVIDE_DMAAPI ( op, dma_unmap, dma_op_unmap );
PROVIDE_DMAAPI ( op, dma_alloc, dma_op_alloc );
PROVIDE_DMAAPI ( op, dma_free, dma_op_free );
PROVIDE_DMAAPI ( op, dma_set_mask, dma_op_set_mask );
PROVIDE_DMAAPI_INLINE ( op, dma_phys );
/******************************************************************************
*

View File

@ -513,7 +513,7 @@ int intel_create_ring ( struct intel_nic *intel, struct intel_ring *ring ) {
memset ( ring->desc, 0, ring->len );
/* Program ring address */
address = ring->map.addr;
address = dma ( &ring->map, ring->desc );
writel ( ( address & 0xffffffffUL ),
( intel->regs + ring->reg + INTEL_xDBAL ) );
if ( sizeof ( physaddr_t ) > sizeof ( uint32_t ) ) {
@ -571,7 +571,6 @@ void intel_refill_rx ( struct intel_nic *intel ) {
struct dma_mapping *map;
unsigned int rx_idx;
unsigned int rx_tail;
physaddr_t address;
unsigned int refilled = 0;
/* Refill ring */
@ -596,8 +595,7 @@ void intel_refill_rx ( struct intel_nic *intel ) {
intel->rx.ring.prod++;
/* Populate receive descriptor */
address = map->addr;
intel->rx.ring.describe ( rx, address, 0 );
intel->rx.ring.describe ( rx, dma ( map, iobuf->data ), 0 );
DBGC2 ( intel, "INTEL %p RX %d is [%lx,%lx)\n",
intel, rx_idx, virt_to_phys ( iobuf->data ),
@ -762,7 +760,6 @@ int intel_transmit ( struct net_device *netdev, struct io_buffer *iobuf ) {
struct dma_mapping *map;
unsigned int tx_idx;
unsigned int tx_tail;
physaddr_t address;
size_t len;
int rc;
@ -783,9 +780,8 @@ int intel_transmit ( struct net_device *netdev, struct io_buffer *iobuf ) {
intel->tx.ring.prod++;
/* Populate transmit descriptor */
address = map->addr;
len = iob_len ( iobuf );
intel->tx.ring.describe ( tx, address, len );
intel->tx.ring.describe ( tx, dma ( map, iobuf->data ), len );
wmb();
/* Notify card that there are packets ready to transmit */

View File

@ -152,7 +152,8 @@ int intelxl_msix_enable ( struct intelxl_nic *intelxl,
}
/* Configure interrupt zero to write to dummy location */
pci_msix_map ( &intelxl->msix.cap, 0, intelxl->msix.map.addr, 0 );
pci_msix_map ( &intelxl->msix.cap, 0,
dma ( &intelxl->msix.map, &intelxl->msix.msg ), 0 );
/* Enable dummy interrupt zero */
pci_msix_unmask ( &intelxl->msix.cap, 0 );
@ -230,22 +231,6 @@ static int intelxl_alloc_admin ( struct intelxl_nic *intelxl,
return 0;
}
/**
* Get DMA address for admin descriptor or buffer entry
*
* @v admin Admin queue
* @v addr Virtual address
* @ret addr DMA address
*/
static physaddr_t intelxl_admin_address ( struct intelxl_admin *admin,
void *addr ) {
size_t offset;
/* Calculate offset within mapped area */
offset = ( addr - ( ( void * ) admin->buf ) );
return ( admin->map.addr + offset );
}
/**
* Enable admin queue
*
@ -270,7 +255,7 @@ static void intelxl_enable_admin ( struct intelxl_nic *intelxl,
admin->index = 0;
/* Program queue address */
address = intelxl_admin_address ( admin, admin->desc );
address = dma ( &admin->map, admin->desc );
writel ( ( address & 0xffffffffUL ), admin_regs + regs->bal );
if ( sizeof ( physaddr_t ) > sizeof ( uint32_t ) ) {
writel ( ( ( ( uint64_t ) address ) >> 32 ),
@ -365,7 +350,7 @@ static void intelxl_admin_event_init ( struct intelxl_nic *intelxl,
/* Initialise descriptor */
evt = &admin->desc[ index % INTELXL_ADMIN_NUM_DESC ];
buf = &admin->buf[ index % INTELXL_ADMIN_NUM_DESC ];
address = intelxl_admin_address ( admin, buf );
address = dma ( &admin->map, buf );
evt->flags = cpu_to_le16 ( INTELXL_ADMIN_FL_BUF );
evt->len = cpu_to_le16 ( sizeof ( *buf ) );
evt->params.buffer.high = cpu_to_le32 ( address >> 32 );
@ -410,7 +395,7 @@ int intelxl_admin_command ( struct intelxl_nic *intelxl ) {
/* Populate data buffer address if applicable */
if ( cmd->flags & cpu_to_le16 ( INTELXL_ADMIN_FL_BUF ) ) {
address = intelxl_admin_address ( admin, buf );
address = dma ( &admin->map, buf );
cmd->params.buffer.high = cpu_to_le32 ( address >> 32 );
cmd->params.buffer.low = cpu_to_le32 ( address & 0xffffffffUL );
}
@ -1267,6 +1252,7 @@ static int intelxl_disable_ring ( struct intelxl_nic *intelxl,
*/
static int intelxl_create_ring ( struct intelxl_nic *intelxl,
struct intelxl_ring *ring ) {
physaddr_t address;
int rc;
/* Allocate descriptor ring */
@ -1274,7 +1260,8 @@ static int intelxl_create_ring ( struct intelxl_nic *intelxl,
goto err_alloc;
/* Program queue context */
if ( ( rc = ring->context ( intelxl, ring->map.addr ) ) != 0 )
address = dma ( &ring->map, ring->desc.raw );
if ( ( rc = ring->context ( intelxl, address ) ) != 0 )
goto err_context;
/* Enable ring */
@ -1346,7 +1333,7 @@ static void intelxl_refill_rx ( struct intelxl_nic *intelxl ) {
intelxl->rx.ring.prod++;
/* Populate receive descriptor */
rx->address = cpu_to_le64 ( map->addr );
rx->address = cpu_to_le64 ( dma ( map, iobuf->data ) );
rx->flags = 0;
DBGC2 ( intelxl, "INTELXL %p RX %d is [%08lx,%08lx)\n",
@ -1537,7 +1524,7 @@ int intelxl_transmit ( struct net_device *netdev, struct io_buffer *iobuf ) {
/* Populate transmit descriptor */
len = iob_len ( iobuf );
tx->address = cpu_to_le64 ( map->addr );
tx->address = cpu_to_le64 ( dma ( map, iobuf->data ) );
tx->len = cpu_to_le32 ( INTELXL_TX_DATA_LEN ( len ) );
tx->flags = cpu_to_le32 ( INTELXL_TX_DATA_DTYP | INTELXL_TX_DATA_EOP |
INTELXL_TX_DATA_RS | INTELXL_TX_DATA_JFDI );

View File

@ -380,12 +380,14 @@ static int intelxlvf_admin_configure ( struct net_device *netdev ) {
buf->cfg.count = cpu_to_le16 ( 1 );
buf->cfg.tx.vsi = cpu_to_le16 ( intelxl->vsi );
buf->cfg.tx.count = cpu_to_le16 ( INTELXL_TX_NUM_DESC );
buf->cfg.tx.base = cpu_to_le64 ( intelxl->tx.ring.map.addr );
buf->cfg.tx.base = cpu_to_le64 ( dma ( &intelxl->tx.ring.map,
intelxl->tx.ring.desc.raw ) );
buf->cfg.rx.vsi = cpu_to_le16 ( intelxl->vsi );
buf->cfg.rx.count = cpu_to_le32 ( INTELXL_RX_NUM_DESC );
buf->cfg.rx.len = cpu_to_le32 ( intelxl->mfs );
buf->cfg.rx.mfs = cpu_to_le32 ( intelxl->mfs );
buf->cfg.rx.base = cpu_to_le64 ( intelxl->rx.ring.map.addr );
buf->cfg.rx.base = cpu_to_le64 ( dma ( &intelxl->rx.ring.map,
intelxl->rx.ring.desc.raw ) );
/* Issue command */
if ( ( rc = intelxlvf_admin_command ( netdev ) ) != 0 )

View File

@ -506,6 +506,7 @@ static void realtek_check_link ( struct net_device *netdev ) {
* @ret rc Return status code
*/
static int realtek_create_buffer ( struct realtek_nic *rtl ) {
struct realtek_rx_buffer *rxbuf = &rtl->rxbuf;
size_t len = ( RTL_RXBUF_LEN + RTL_RXBUF_PAD );
/* Do nothing unless in legacy mode */
@ -513,17 +514,16 @@ static int realtek_create_buffer ( struct realtek_nic *rtl ) {
return 0;
/* Allocate buffer */
rtl->rxbuf.data = dma_alloc ( rtl->dma, len, RTL_RXBUF_ALIGN,
&rtl->rxbuf.map );
if ( ! rtl->rxbuf.data )
rxbuf->data = dma_alloc ( rtl->dma, len, RTL_RXBUF_ALIGN, &rxbuf->map );
if ( ! rxbuf->data )
return -ENOMEM;
/* Program buffer address */
writel ( rtl->rxbuf.map.addr, rtl->regs + RTL_RBSTART );
writel ( dma ( &rxbuf->map, rxbuf->data ), rtl->regs + RTL_RBSTART );
DBGC ( rtl, "REALTEK %p receive buffer is at [%08lx,%08lx,%08lx)\n",
rtl, virt_to_phys ( rtl->rxbuf.data ),
( virt_to_phys ( rtl->rxbuf.data ) + RTL_RXBUF_LEN ),
( virt_to_phys ( rtl->rxbuf.data ) + len ) );
rtl, virt_to_phys ( rxbuf->data ),
( virt_to_phys ( rxbuf->data ) + RTL_RXBUF_LEN ),
( virt_to_phys ( rxbuf->data ) + len ) );
return 0;
}
@ -534,6 +534,7 @@ static int realtek_create_buffer ( struct realtek_nic *rtl ) {
* @v rtl Realtek device
*/
static void realtek_destroy_buffer ( struct realtek_nic *rtl ) {
struct realtek_rx_buffer *rxbuf = &rtl->rxbuf;
size_t len = ( RTL_RXBUF_LEN + RTL_RXBUF_PAD );
/* Do nothing unless in legacy mode */
@ -544,9 +545,9 @@ static void realtek_destroy_buffer ( struct realtek_nic *rtl ) {
writel ( 0, rtl->regs + RTL_RBSTART );
/* Free buffer */
dma_free ( rtl->dma, rtl->rxbuf.data, len, &rtl->rxbuf.map );
rtl->rxbuf.data = NULL;
rtl->rxbuf.offset = 0;
dma_free ( rtl->dma, rxbuf->data, len, &rxbuf->map );
rxbuf->data = NULL;
rxbuf->offset = 0;
}
/**
@ -574,7 +575,7 @@ static int realtek_create_ring ( struct realtek_nic *rtl,
memset ( ring->desc, 0, ring->len );
/* Program ring address */
address = ring->map.addr;
address = dma ( &ring->map, ring->desc );
writel ( ( ( ( uint64_t ) address ) >> 32 ),
rtl->regs + ring->reg + 4 );
writel ( ( address & 0xffffffffUL ), rtl->regs + ring->reg );
@ -648,7 +649,7 @@ static void realtek_refill_rx ( struct realtek_nic *rtl ) {
rtl->rx.ring.prod++;
/* Populate receive descriptor */
rx->address = cpu_to_le64 ( map->addr );
rx->address = cpu_to_le64 ( dma ( map, iobuf->data ) );
rx->length = cpu_to_le16 ( RTL_RX_MAX_LEN );
wmb();
rx->flags = ( cpu_to_le16 ( RTL_DESC_OWN ) |
@ -797,7 +798,7 @@ static int realtek_transmit ( struct net_device *netdev,
/* Map I/O buffer */
if ( ( rc = dma_map_tx_iob ( rtl->dma, iobuf, map ) ) != 0 )
return rc;
address = map->addr;
address = dma ( map, iobuf->data );
/* Update producer index */
rtl->tx.ring.prod++;

View File

@ -30,8 +30,13 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
/** A DMA mapping */
struct dma_mapping {
/** Device-side address */
physaddr_t addr;
/** Address offset
*
* This is the value that must be added to a physical address
* within the mapping in order to produce the corresponding
* device-side DMA address.
*/
physaddr_t offset;
/** Platform mapping token */
void *token;
};
@ -148,12 +153,10 @@ struct dma_operations {
* @ret rc Return status code
*/
static inline __always_inline int
DMAAPI_INLINE ( flat, dma_map ) ( struct dma_device *dma, physaddr_t addr,
DMAAPI_INLINE ( flat, dma_map ) ( struct dma_device *dma,
physaddr_t addr __unused,
size_t len __unused, int flags __unused,
struct dma_mapping *map ) {
/* Use physical address as device address */
map->addr = addr;
struct dma_mapping *map __unused ) {
/* Increment mapping count (for debugging) */
if ( DBG_LOG )
@ -187,13 +190,13 @@ DMAAPI_INLINE ( flat, dma_unmap ) ( struct dma_device *dma,
* @ret addr Buffer address, or NULL on error
*/
static inline __always_inline void *
DMAAPI_INLINE ( flat, dma_alloc ) ( struct dma_device *dma, size_t len,
size_t align, struct dma_mapping *map ) {
DMAAPI_INLINE ( flat, dma_alloc ) ( struct dma_device *dma,
size_t len, size_t align,
struct dma_mapping *map __unused ) {
void *addr;
/* Allocate buffer */
addr = malloc_phys ( len, align );
map->addr = virt_to_phys ( addr );
/* Increment allocation count (for debugging) */
if ( DBG_LOG && addr )
@ -236,6 +239,35 @@ DMAAPI_INLINE ( flat, dma_set_mask ) ( struct dma_device *dma __unused,
/* Nothing to do */
}
/**
* Get DMA address from physical address
*
* @v map DMA mapping
* @v addr Physical address within the mapped region
* @ret addr Device-side DMA address
*/
static inline __always_inline physaddr_t
DMAAPI_INLINE ( flat, dma_phys ) ( struct dma_mapping *map __unused,
physaddr_t addr ) {
/* Use physical address as device address */
return addr;
}
/**
* Get DMA address from physical address
*
* @v map DMA mapping
* @v addr Physical address within the mapped region
* @ret addr Device-side DMA address
*/
static inline __always_inline physaddr_t
DMAAPI_INLINE ( op, dma_phys ) ( struct dma_mapping *map, physaddr_t addr ) {
/* Adjust physical address using mapping offset */
return ( addr + map->offset );
}
/**
* Map buffer for DMA
*
@ -288,6 +320,28 @@ void dma_free ( struct dma_device *dma, void *addr, size_t len,
*/
void dma_set_mask ( struct dma_device *dma, physaddr_t mask );
/**
* Get DMA address from physical address
*
* @v map DMA mapping
* @v addr Physical address within the mapped region
* @ret addr Device-side DMA address
*/
physaddr_t dma_phys ( struct dma_mapping *map, physaddr_t addr );
/**
* Get DMA address from virtual address
*
* @v map DMA mapping
* @v addr Virtual address within the mapped region
* @ret addr Device-side DMA address
*/
static inline __always_inline physaddr_t dma ( struct dma_mapping *map,
void *addr ) {
return dma_phys ( map, virt_to_phys ( addr ) );
}
/**
* Initialise DMA device
*

View File

@ -335,7 +335,7 @@ static int efipci_dma_map ( struct dma_device *dma, physaddr_t addr, size_t len,
int rc;
/* Sanity check */
assert ( map->addr == 0 );
assert ( map->offset == 0 );
assert ( map->token == NULL );
/* Determine operation */
@ -374,7 +374,7 @@ static int efipci_dma_map ( struct dma_device *dma, physaddr_t addr, size_t len,
}
/* Populate mapping */
map->addr = bus;
map->offset = ( bus - addr );
map->token = mapping;
/* Increment mapping count (for debugging) */
@ -408,7 +408,7 @@ static void efipci_dma_unmap ( struct dma_device *dma,
pci_io->Unmap ( pci_io, map->token );
/* Clear mapping */
map->addr = 0;
map->offset = 0;
map->token = NULL;
/* Decrement mapping count (for debugging) */