mirror of https://github.com/ipxe/ipxe.git
[dma] Record DMA device as part of DMA mapping if needed
Allow for dma_unmap() to be called by code other than the DMA device driver itself. Signed-off-by: Michael Brown <mcb30@ipxe.org>pull/181/head
parent
cf12a41703
commit
70e6e83243
|
@ -59,66 +59,65 @@ PROVIDE_DMAAPI_INLINE ( flat, dma_phys );
|
||||||
* Map buffer for DMA
|
* Map buffer for DMA
|
||||||
*
|
*
|
||||||
* @v dma DMA device
|
* @v dma DMA device
|
||||||
|
* @v map DMA mapping to fill in
|
||||||
* @v addr Buffer address
|
* @v addr Buffer address
|
||||||
* @v len Length of buffer
|
* @v len Length of buffer
|
||||||
* @v flags Mapping flags
|
* @v flags Mapping flags
|
||||||
* @v map DMA mapping to fill in
|
|
||||||
* @ret rc Return status code
|
* @ret rc Return status code
|
||||||
*/
|
*/
|
||||||
static int dma_op_map ( struct dma_device *dma, physaddr_t addr, size_t len,
|
static int dma_op_map ( struct dma_device *dma, struct dma_mapping *map,
|
||||||
int flags, struct dma_mapping *map ) {
|
physaddr_t addr, size_t len, int flags ) {
|
||||||
struct dma_operations *op = dma->op;
|
struct dma_operations *op = dma->op;
|
||||||
|
|
||||||
if ( ! op )
|
if ( ! op )
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
return op->map ( dma, addr, len, flags, map );
|
return op->map ( dma, map, addr, len, flags );
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Unmap buffer
|
* Unmap buffer
|
||||||
*
|
*
|
||||||
* @v dma DMA device
|
|
||||||
* @v map DMA mapping
|
* @v map DMA mapping
|
||||||
*/
|
*/
|
||||||
static void dma_op_unmap ( struct dma_device *dma, struct dma_mapping *map ) {
|
static void dma_op_unmap ( struct dma_mapping *map ) {
|
||||||
struct dma_operations *op = dma->op;
|
struct dma_device *dma = map->dma;
|
||||||
|
|
||||||
assert ( op != NULL );
|
assert ( dma != NULL );
|
||||||
op->unmap ( dma, map );
|
assert ( dma->op != NULL );
|
||||||
|
dma->op->unmap ( dma, map );
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Allocate and map DMA-coherent buffer
|
* Allocate and map DMA-coherent buffer
|
||||||
*
|
*
|
||||||
* @v dma DMA device
|
* @v dma DMA device
|
||||||
|
* @v map DMA mapping to fill in
|
||||||
* @v len Length of buffer
|
* @v len Length of buffer
|
||||||
* @v align Physical alignment
|
* @v align Physical alignment
|
||||||
* @v map DMA mapping to fill in
|
|
||||||
* @ret addr Buffer address, or NULL on error
|
* @ret addr Buffer address, or NULL on error
|
||||||
*/
|
*/
|
||||||
static void * dma_op_alloc ( struct dma_device *dma, size_t len, size_t align,
|
static void * dma_op_alloc ( struct dma_device *dma, struct dma_mapping *map,
|
||||||
struct dma_mapping *map ) {
|
size_t len, size_t align ) {
|
||||||
struct dma_operations *op = dma->op;
|
struct dma_operations *op = dma->op;
|
||||||
|
|
||||||
if ( ! op )
|
if ( ! op )
|
||||||
return NULL;
|
return NULL;
|
||||||
return op->alloc ( dma, len, align, map );
|
return op->alloc ( dma, map, len, align );
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Unmap and free DMA-coherent buffer
|
* Unmap and free DMA-coherent buffer
|
||||||
*
|
*
|
||||||
* @v dma DMA device
|
* @v map DMA mapping
|
||||||
* @v addr Buffer address
|
* @v addr Buffer address
|
||||||
* @v len Length of buffer
|
* @v len Length of buffer
|
||||||
* @v map DMA mapping
|
|
||||||
*/
|
*/
|
||||||
static void dma_op_free ( struct dma_device *dma, void *addr, size_t len,
|
static void dma_op_free ( struct dma_mapping *map, void *addr, size_t len ) {
|
||||||
struct dma_mapping *map ) {
|
struct dma_device *dma = map->dma;
|
||||||
struct dma_operations *op = dma->op;
|
|
||||||
|
|
||||||
assert ( op != NULL );
|
assert ( dma != NULL );
|
||||||
op->free ( dma, addr, len, map );
|
assert ( dma->op != NULL );
|
||||||
|
dma->op->free ( dma, map, addr, len );
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -152,12 +151,13 @@ PROVIDE_DMAAPI_INLINE ( op, dma_phys );
|
||||||
* Allocate and map I/O buffer for receiving data from device
|
* Allocate and map I/O buffer for receiving data from device
|
||||||
*
|
*
|
||||||
* @v dma DMA device
|
* @v dma DMA device
|
||||||
* @v len Length of I/O buffer
|
|
||||||
* @v map DMA mapping to fill in
|
* @v map DMA mapping to fill in
|
||||||
|
* @v len Length of I/O buffer
|
||||||
* @ret iobuf I/O buffer, or NULL on error
|
* @ret iobuf I/O buffer, or NULL on error
|
||||||
*/
|
*/
|
||||||
struct io_buffer * dma_alloc_rx_iob ( struct dma_device *dma, size_t len,
|
struct io_buffer * dma_alloc_rx_iob ( struct dma_device *dma,
|
||||||
struct dma_mapping *map ) {
|
struct dma_mapping *map,
|
||||||
|
size_t len ) {
|
||||||
struct io_buffer *iobuf;
|
struct io_buffer *iobuf;
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
|
@ -167,13 +167,13 @@ struct io_buffer * dma_alloc_rx_iob ( struct dma_device *dma, size_t len,
|
||||||
goto err_alloc;
|
goto err_alloc;
|
||||||
|
|
||||||
/* Map I/O buffer */
|
/* Map I/O buffer */
|
||||||
if ( ( rc = dma_map ( dma, virt_to_phys ( iobuf->data ), len,
|
if ( ( rc = dma_map ( dma, map, virt_to_phys ( iobuf->data ),
|
||||||
DMA_RX, map ) ) != 0 )
|
len, DMA_RX ) ) != 0 )
|
||||||
goto err_map;
|
goto err_map;
|
||||||
|
|
||||||
return iobuf;
|
return iobuf;
|
||||||
|
|
||||||
dma_unmap ( dma, map );
|
dma_unmap ( map );
|
||||||
err_map:
|
err_map:
|
||||||
free_iob ( iobuf );
|
free_iob ( iobuf );
|
||||||
err_alloc:
|
err_alloc:
|
||||||
|
|
|
@ -504,8 +504,8 @@ int intel_create_ring ( struct intel_nic *intel, struct intel_ring *ring ) {
|
||||||
* prevent any possible page-crossing errors due to hardware
|
* prevent any possible page-crossing errors due to hardware
|
||||||
* errata.
|
* errata.
|
||||||
*/
|
*/
|
||||||
ring->desc = dma_alloc ( intel->dma, ring->len, ring->len,
|
ring->desc = dma_alloc ( intel->dma, &ring->map, ring->len,
|
||||||
&ring->map );
|
ring->len );
|
||||||
if ( ! ring->desc )
|
if ( ! ring->desc )
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
@ -554,7 +554,7 @@ void intel_destroy_ring ( struct intel_nic *intel, struct intel_ring *ring ) {
|
||||||
intel_reset_ring ( intel, ring->reg );
|
intel_reset_ring ( intel, ring->reg );
|
||||||
|
|
||||||
/* Free descriptor ring */
|
/* Free descriptor ring */
|
||||||
dma_free ( intel->dma, ring->desc, ring->len, &ring->map );
|
dma_free ( &ring->map, ring->desc, ring->len );
|
||||||
ring->desc = NULL;
|
ring->desc = NULL;
|
||||||
ring->prod = 0;
|
ring->prod = 0;
|
||||||
ring->cons = 0;
|
ring->cons = 0;
|
||||||
|
@ -584,7 +584,7 @@ void intel_refill_rx ( struct intel_nic *intel ) {
|
||||||
assert ( intel->rx.iobuf[rx_idx] == NULL );
|
assert ( intel->rx.iobuf[rx_idx] == NULL );
|
||||||
|
|
||||||
/* Allocate I/O buffer */
|
/* Allocate I/O buffer */
|
||||||
iobuf = dma_alloc_rx_iob ( intel->dma, INTEL_RX_MAX_LEN, map );
|
iobuf = dma_alloc_rx_iob ( intel->dma, map, INTEL_RX_MAX_LEN );
|
||||||
if ( ! iobuf ) {
|
if ( ! iobuf ) {
|
||||||
/* Wait for next refill */
|
/* Wait for next refill */
|
||||||
break;
|
break;
|
||||||
|
@ -630,7 +630,7 @@ void intel_flush ( struct intel_nic *intel ) {
|
||||||
/* Discard unused receive buffers */
|
/* Discard unused receive buffers */
|
||||||
for ( i = 0 ; i < INTEL_NUM_RX_DESC ; i++ ) {
|
for ( i = 0 ; i < INTEL_NUM_RX_DESC ; i++ ) {
|
||||||
if ( intel->rx.iobuf[i] ) {
|
if ( intel->rx.iobuf[i] ) {
|
||||||
dma_unmap ( intel->dma, &intel->rx.map[i] );
|
dma_unmap ( &intel->rx.map[i] );
|
||||||
free_iob ( intel->rx.iobuf[i] );
|
free_iob ( intel->rx.iobuf[i] );
|
||||||
}
|
}
|
||||||
intel->rx.iobuf[i] = NULL;
|
intel->rx.iobuf[i] = NULL;
|
||||||
|
@ -639,7 +639,7 @@ void intel_flush ( struct intel_nic *intel ) {
|
||||||
/* Unmap incomplete transmit buffers */
|
/* Unmap incomplete transmit buffers */
|
||||||
for ( i = intel->tx.ring.cons ; i != intel->tx.ring.prod ; i++ ) {
|
for ( i = intel->tx.ring.cons ; i != intel->tx.ring.prod ; i++ ) {
|
||||||
tx_idx = ( i % INTEL_NUM_TX_DESC );
|
tx_idx = ( i % INTEL_NUM_TX_DESC );
|
||||||
dma_unmap ( intel->dma, &intel->tx.map[tx_idx] );
|
dma_unmap ( &intel->tx.map[tx_idx] );
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -773,7 +773,7 @@ int intel_transmit ( struct net_device *netdev, struct io_buffer *iobuf ) {
|
||||||
map = &intel->tx.map[tx_idx];
|
map = &intel->tx.map[tx_idx];
|
||||||
|
|
||||||
/* Map I/O buffer */
|
/* Map I/O buffer */
|
||||||
if ( ( rc = dma_map_tx_iob ( intel->dma, iobuf, map ) ) != 0 )
|
if ( ( rc = dma_map_tx_iob ( intel->dma, map, iobuf ) ) != 0 )
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
/* Update producer index */
|
/* Update producer index */
|
||||||
|
@ -822,7 +822,7 @@ void intel_poll_tx ( struct net_device *netdev ) {
|
||||||
DBGC2 ( intel, "INTEL %p TX %d complete\n", intel, tx_idx );
|
DBGC2 ( intel, "INTEL %p TX %d complete\n", intel, tx_idx );
|
||||||
|
|
||||||
/* Unmap I/O buffer */
|
/* Unmap I/O buffer */
|
||||||
dma_unmap ( intel->dma, &intel->tx.map[tx_idx] );
|
dma_unmap ( &intel->tx.map[tx_idx] );
|
||||||
|
|
||||||
/* Complete TX descriptor */
|
/* Complete TX descriptor */
|
||||||
netdev_tx_complete_next ( netdev );
|
netdev_tx_complete_next ( netdev );
|
||||||
|
@ -854,7 +854,7 @@ void intel_poll_rx ( struct net_device *netdev ) {
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* Unmap I/O buffer */
|
/* Unmap I/O buffer */
|
||||||
dma_unmap ( intel->dma, &intel->rx.map[rx_idx] );
|
dma_unmap ( &intel->rx.map[rx_idx] );
|
||||||
|
|
||||||
/* Populate I/O buffer */
|
/* Populate I/O buffer */
|
||||||
iobuf = intel->rx.iobuf[rx_idx];
|
iobuf = intel->rx.iobuf[rx_idx];
|
||||||
|
|
|
@ -136,9 +136,9 @@ int intelxl_msix_enable ( struct intelxl_nic *intelxl,
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
/* Map dummy target location */
|
/* Map dummy target location */
|
||||||
if ( ( rc = dma_map ( intelxl->dma, virt_to_phys ( &intelxl->msix.msg ),
|
if ( ( rc = dma_map ( intelxl->dma, &intelxl->msix.map,
|
||||||
sizeof ( intelxl->msix.msg ), DMA_RX,
|
virt_to_phys ( &intelxl->msix.msg ),
|
||||||
&intelxl->msix.map ) ) != 0 ) {
|
sizeof ( intelxl->msix.msg ), DMA_RX ) ) != 0 ) {
|
||||||
DBGC ( intelxl, "INTELXL %p could not map MSI-X target: %s\n",
|
DBGC ( intelxl, "INTELXL %p could not map MSI-X target: %s\n",
|
||||||
intelxl, strerror ( rc ) );
|
intelxl, strerror ( rc ) );
|
||||||
goto err_map;
|
goto err_map;
|
||||||
|
@ -162,7 +162,7 @@ int intelxl_msix_enable ( struct intelxl_nic *intelxl,
|
||||||
|
|
||||||
pci_msix_disable ( pci, &intelxl->msix.cap );
|
pci_msix_disable ( pci, &intelxl->msix.cap );
|
||||||
err_enable:
|
err_enable:
|
||||||
dma_unmap ( intelxl->dma, &intelxl->msix.map );
|
dma_unmap ( &intelxl->msix.map );
|
||||||
err_map:
|
err_map:
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
@ -183,7 +183,7 @@ void intelxl_msix_disable ( struct intelxl_nic *intelxl,
|
||||||
pci_msix_disable ( pci, &intelxl->msix.cap );
|
pci_msix_disable ( pci, &intelxl->msix.cap );
|
||||||
|
|
||||||
/* Unmap dummy target location */
|
/* Unmap dummy target location */
|
||||||
dma_unmap ( intelxl->dma, &intelxl->msix.map );
|
dma_unmap ( &intelxl->msix.map );
|
||||||
}
|
}
|
||||||
|
|
||||||
/******************************************************************************
|
/******************************************************************************
|
||||||
|
@ -215,8 +215,8 @@ static int intelxl_alloc_admin ( struct intelxl_nic *intelxl,
|
||||||
size_t len = ( sizeof ( admin->desc[0] ) * INTELXL_ADMIN_NUM_DESC );
|
size_t len = ( sizeof ( admin->desc[0] ) * INTELXL_ADMIN_NUM_DESC );
|
||||||
|
|
||||||
/* Allocate admin queue */
|
/* Allocate admin queue */
|
||||||
admin->buf = dma_alloc ( intelxl->dma, ( buf_len + len ),
|
admin->buf = dma_alloc ( intelxl->dma, &admin->map, ( buf_len + len ),
|
||||||
INTELXL_ALIGN, &admin->map );
|
INTELXL_ALIGN );
|
||||||
if ( ! admin->buf )
|
if ( ! admin->buf )
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
admin->desc = ( ( ( void * ) admin->buf ) + buf_len );
|
admin->desc = ( ( ( void * ) admin->buf ) + buf_len );
|
||||||
|
@ -291,13 +291,13 @@ static void intelxl_disable_admin ( struct intelxl_nic *intelxl,
|
||||||
* @v intelxl Intel device
|
* @v intelxl Intel device
|
||||||
* @v admin Admin queue
|
* @v admin Admin queue
|
||||||
*/
|
*/
|
||||||
static void intelxl_free_admin ( struct intelxl_nic *intelxl,
|
static void intelxl_free_admin ( struct intelxl_nic *intelxl __unused,
|
||||||
struct intelxl_admin *admin ) {
|
struct intelxl_admin *admin ) {
|
||||||
size_t buf_len = ( sizeof ( admin->buf[0] ) * INTELXL_ADMIN_NUM_DESC );
|
size_t buf_len = ( sizeof ( admin->buf[0] ) * INTELXL_ADMIN_NUM_DESC );
|
||||||
size_t len = ( sizeof ( admin->desc[0] ) * INTELXL_ADMIN_NUM_DESC );
|
size_t len = ( sizeof ( admin->desc[0] ) * INTELXL_ADMIN_NUM_DESC );
|
||||||
|
|
||||||
/* Free queue */
|
/* Free queue */
|
||||||
dma_free ( intelxl->dma, admin->buf, ( buf_len + len ), &admin->map );
|
dma_free ( &admin->map, admin->buf, ( buf_len + len ) );
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -945,8 +945,8 @@ int intelxl_alloc_ring ( struct intelxl_nic *intelxl,
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
/* Allocate descriptor ring */
|
/* Allocate descriptor ring */
|
||||||
ring->desc.raw = dma_alloc ( intelxl->dma, ring->len, INTELXL_ALIGN,
|
ring->desc.raw = dma_alloc ( intelxl->dma, &ring->map, ring->len,
|
||||||
&ring->map );
|
INTELXL_ALIGN );
|
||||||
if ( ! ring->desc.raw ) {
|
if ( ! ring->desc.raw ) {
|
||||||
rc = -ENOMEM;
|
rc = -ENOMEM;
|
||||||
goto err_alloc;
|
goto err_alloc;
|
||||||
|
@ -969,7 +969,7 @@ int intelxl_alloc_ring ( struct intelxl_nic *intelxl,
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
dma_free ( intelxl->dma, ring->desc.raw, ring->len, &ring->map );
|
dma_free ( &ring->map, ring->desc.raw, ring->len );
|
||||||
err_alloc:
|
err_alloc:
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
@ -980,11 +980,11 @@ int intelxl_alloc_ring ( struct intelxl_nic *intelxl,
|
||||||
* @v intelxl Intel device
|
* @v intelxl Intel device
|
||||||
* @v ring Descriptor ring
|
* @v ring Descriptor ring
|
||||||
*/
|
*/
|
||||||
void intelxl_free_ring ( struct intelxl_nic *intelxl,
|
void intelxl_free_ring ( struct intelxl_nic *intelxl __unused,
|
||||||
struct intelxl_ring *ring ) {
|
struct intelxl_ring *ring ) {
|
||||||
|
|
||||||
/* Free descriptor ring */
|
/* Free descriptor ring */
|
||||||
dma_free ( intelxl->dma, ring->desc.raw, ring->len, &ring->map );
|
dma_free ( &ring->map, ring->desc.raw, ring->len );
|
||||||
ring->desc.raw = NULL;
|
ring->desc.raw = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1322,7 +1322,7 @@ static void intelxl_refill_rx ( struct intelxl_nic *intelxl ) {
|
||||||
assert ( intelxl->rx.iobuf[rx_idx] == NULL );
|
assert ( intelxl->rx.iobuf[rx_idx] == NULL );
|
||||||
|
|
||||||
/* Allocate I/O buffer */
|
/* Allocate I/O buffer */
|
||||||
iobuf = dma_alloc_rx_iob ( intelxl->dma, intelxl->mfs, map );
|
iobuf = dma_alloc_rx_iob ( intelxl->dma, map, intelxl->mfs );
|
||||||
if ( ! iobuf ) {
|
if ( ! iobuf ) {
|
||||||
/* Wait for next refill */
|
/* Wait for next refill */
|
||||||
break;
|
break;
|
||||||
|
@ -1365,7 +1365,7 @@ void intelxl_flush ( struct intelxl_nic *intelxl ) {
|
||||||
/* Discard any unused receive buffers */
|
/* Discard any unused receive buffers */
|
||||||
for ( i = 0 ; i < INTELXL_RX_NUM_DESC ; i++ ) {
|
for ( i = 0 ; i < INTELXL_RX_NUM_DESC ; i++ ) {
|
||||||
if ( intelxl->rx.iobuf[i] ) {
|
if ( intelxl->rx.iobuf[i] ) {
|
||||||
dma_unmap ( intelxl->dma, &intelxl->rx.map[i] );
|
dma_unmap ( &intelxl->rx.map[i] );
|
||||||
free_iob ( intelxl->rx.iobuf[i] );
|
free_iob ( intelxl->rx.iobuf[i] );
|
||||||
}
|
}
|
||||||
intelxl->rx.iobuf[i] = NULL;
|
intelxl->rx.iobuf[i] = NULL;
|
||||||
|
@ -1374,7 +1374,7 @@ void intelxl_flush ( struct intelxl_nic *intelxl ) {
|
||||||
/* Unmap incomplete transmit buffers */
|
/* Unmap incomplete transmit buffers */
|
||||||
for ( i = intelxl->tx.ring.cons ; i != intelxl->tx.ring.prod ; i++ ) {
|
for ( i = intelxl->tx.ring.cons ; i != intelxl->tx.ring.prod ; i++ ) {
|
||||||
tx_idx = ( i % INTELXL_TX_NUM_DESC );
|
tx_idx = ( i % INTELXL_TX_NUM_DESC );
|
||||||
dma_unmap ( intelxl->dma, &intelxl->tx.map[tx_idx] );
|
dma_unmap ( &intelxl->tx.map[tx_idx] );
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1516,7 +1516,7 @@ int intelxl_transmit ( struct net_device *netdev, struct io_buffer *iobuf ) {
|
||||||
map = &intelxl->tx.map[tx_idx];
|
map = &intelxl->tx.map[tx_idx];
|
||||||
|
|
||||||
/* Map I/O buffer */
|
/* Map I/O buffer */
|
||||||
if ( ( rc = dma_map_tx_iob ( intelxl->dma, iobuf, map ) ) != 0 )
|
if ( ( rc = dma_map_tx_iob ( intelxl->dma, map, iobuf ) ) != 0 )
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
/* Update producer index */
|
/* Update producer index */
|
||||||
|
@ -1564,7 +1564,7 @@ static void intelxl_poll_tx ( struct net_device *netdev ) {
|
||||||
intelxl, tx_idx );
|
intelxl, tx_idx );
|
||||||
|
|
||||||
/* Unmap I/O buffer */
|
/* Unmap I/O buffer */
|
||||||
dma_unmap ( intelxl->dma, &intelxl->tx.map[tx_idx] );
|
dma_unmap ( &intelxl->tx.map[tx_idx] );
|
||||||
|
|
||||||
/* Complete TX descriptor */
|
/* Complete TX descriptor */
|
||||||
netdev_tx_complete_next ( netdev );
|
netdev_tx_complete_next ( netdev );
|
||||||
|
@ -1597,7 +1597,7 @@ static void intelxl_poll_rx ( struct net_device *netdev ) {
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* Unmap I/O buffer */
|
/* Unmap I/O buffer */
|
||||||
dma_unmap ( intelxl->dma, &intelxl->rx.map[rx_idx] );
|
dma_unmap ( &intelxl->rx.map[rx_idx] );
|
||||||
|
|
||||||
/* Populate I/O buffer */
|
/* Populate I/O buffer */
|
||||||
iobuf = intelxl->rx.iobuf[rx_idx];
|
iobuf = intelxl->rx.iobuf[rx_idx];
|
||||||
|
|
|
@ -514,7 +514,8 @@ static int realtek_create_buffer ( struct realtek_nic *rtl ) {
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* Allocate buffer */
|
/* Allocate buffer */
|
||||||
rxbuf->data = dma_alloc ( rtl->dma, len, RTL_RXBUF_ALIGN, &rxbuf->map );
|
rxbuf->data = dma_alloc ( rtl->dma, &rxbuf->map, len,
|
||||||
|
RTL_RXBUF_ALIGN );
|
||||||
if ( ! rxbuf->data )
|
if ( ! rxbuf->data )
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
@ -545,7 +546,7 @@ static void realtek_destroy_buffer ( struct realtek_nic *rtl ) {
|
||||||
writel ( 0, rtl->regs + RTL_RBSTART );
|
writel ( 0, rtl->regs + RTL_RBSTART );
|
||||||
|
|
||||||
/* Free buffer */
|
/* Free buffer */
|
||||||
dma_free ( rtl->dma, rxbuf->data, len, &rxbuf->map );
|
dma_free ( &rxbuf->map, rxbuf->data, len );
|
||||||
rxbuf->data = NULL;
|
rxbuf->data = NULL;
|
||||||
rxbuf->offset = 0;
|
rxbuf->offset = 0;
|
||||||
}
|
}
|
||||||
|
@ -566,8 +567,8 @@ static int realtek_create_ring ( struct realtek_nic *rtl,
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* Allocate descriptor ring */
|
/* Allocate descriptor ring */
|
||||||
ring->desc = dma_alloc ( rtl->dma, ring->len, RTL_RING_ALIGN,
|
ring->desc = dma_alloc ( rtl->dma, &ring->map, ring->len,
|
||||||
&ring->map );
|
RTL_RING_ALIGN );
|
||||||
if ( ! ring->desc )
|
if ( ! ring->desc )
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
@ -608,7 +609,7 @@ static void realtek_destroy_ring ( struct realtek_nic *rtl,
|
||||||
writel ( 0, rtl->regs + ring->reg + 4 );
|
writel ( 0, rtl->regs + ring->reg + 4 );
|
||||||
|
|
||||||
/* Free descriptor ring */
|
/* Free descriptor ring */
|
||||||
dma_free ( rtl->dma, ring->desc, ring->len, &ring->map );
|
dma_free ( &ring->map, ring->desc, ring->len );
|
||||||
ring->desc = NULL;
|
ring->desc = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -638,7 +639,7 @@ static void realtek_refill_rx ( struct realtek_nic *rtl ) {
|
||||||
assert ( rtl->rx.iobuf[rx_idx] == NULL );
|
assert ( rtl->rx.iobuf[rx_idx] == NULL );
|
||||||
|
|
||||||
/* Allocate I/O buffer */
|
/* Allocate I/O buffer */
|
||||||
iobuf = dma_alloc_rx_iob ( rtl->dma, RTL_RX_MAX_LEN, map );
|
iobuf = dma_alloc_rx_iob ( rtl->dma, map, RTL_RX_MAX_LEN );
|
||||||
if ( ! iobuf ) {
|
if ( ! iobuf ) {
|
||||||
/* Wait for next refill */
|
/* Wait for next refill */
|
||||||
return;
|
return;
|
||||||
|
@ -748,7 +749,7 @@ static void realtek_close ( struct net_device *netdev ) {
|
||||||
/* Discard any unused receive buffers */
|
/* Discard any unused receive buffers */
|
||||||
for ( i = 0 ; i < RTL_NUM_RX_DESC ; i++ ) {
|
for ( i = 0 ; i < RTL_NUM_RX_DESC ; i++ ) {
|
||||||
if ( rtl->rx.iobuf[i] ) {
|
if ( rtl->rx.iobuf[i] ) {
|
||||||
dma_unmap ( rtl->dma, &rtl->rx.map[i] );
|
dma_unmap ( &rtl->rx.map[i] );
|
||||||
free_iob ( rtl->rx.iobuf[i] );
|
free_iob ( rtl->rx.iobuf[i] );
|
||||||
}
|
}
|
||||||
rtl->rx.iobuf[i] = NULL;
|
rtl->rx.iobuf[i] = NULL;
|
||||||
|
@ -756,7 +757,7 @@ static void realtek_close ( struct net_device *netdev ) {
|
||||||
|
|
||||||
/* Unmap any incomplete transmit buffers */
|
/* Unmap any incomplete transmit buffers */
|
||||||
for ( i = rtl->tx.ring.cons ; i != rtl->tx.ring.prod ; i++ )
|
for ( i = rtl->tx.ring.cons ; i != rtl->tx.ring.prod ; i++ )
|
||||||
dma_unmap ( rtl->dma, &rtl->tx.map[ i % RTL_NUM_TX_DESC ] );
|
dma_unmap ( &rtl->tx.map[ i % RTL_NUM_TX_DESC ] );
|
||||||
|
|
||||||
/* Destroy transmit descriptor ring */
|
/* Destroy transmit descriptor ring */
|
||||||
realtek_destroy_ring ( rtl, &rtl->tx.ring );
|
realtek_destroy_ring ( rtl, &rtl->tx.ring );
|
||||||
|
@ -796,7 +797,7 @@ static int realtek_transmit ( struct net_device *netdev,
|
||||||
iob_pad ( iobuf, ETH_ZLEN );
|
iob_pad ( iobuf, ETH_ZLEN );
|
||||||
|
|
||||||
/* Map I/O buffer */
|
/* Map I/O buffer */
|
||||||
if ( ( rc = dma_map_tx_iob ( rtl->dma, iobuf, map ) ) != 0 )
|
if ( ( rc = dma_map_tx_iob ( rtl->dma, map, iobuf ) ) != 0 )
|
||||||
return rc;
|
return rc;
|
||||||
address = dma ( map, iobuf->data );
|
address = dma ( map, iobuf->data );
|
||||||
|
|
||||||
|
@ -870,7 +871,7 @@ static void realtek_poll_tx ( struct net_device *netdev ) {
|
||||||
DBGC2 ( rtl, "REALTEK %p TX %d complete\n", rtl, tx_idx );
|
DBGC2 ( rtl, "REALTEK %p TX %d complete\n", rtl, tx_idx );
|
||||||
|
|
||||||
/* Unmap I/O buffer */
|
/* Unmap I/O buffer */
|
||||||
dma_unmap ( rtl->dma, &rtl->tx.map[tx_idx] );
|
dma_unmap ( &rtl->tx.map[tx_idx] );
|
||||||
|
|
||||||
/* Complete TX descriptor */
|
/* Complete TX descriptor */
|
||||||
rtl->tx.ring.cons++;
|
rtl->tx.ring.cons++;
|
||||||
|
@ -964,7 +965,7 @@ static void realtek_poll_rx ( struct net_device *netdev ) {
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* Unmap buffer */
|
/* Unmap buffer */
|
||||||
dma_unmap ( rtl->dma, &rtl->rx.map[rx_idx] );
|
dma_unmap ( &rtl->rx.map[rx_idx] );
|
||||||
|
|
||||||
/* Populate I/O buffer */
|
/* Populate I/O buffer */
|
||||||
iobuf = rtl->rx.iobuf[rx_idx];
|
iobuf = rtl->rx.iobuf[rx_idx];
|
||||||
|
|
|
@ -37,6 +37,8 @@ struct dma_mapping {
|
||||||
* device-side DMA address.
|
* device-side DMA address.
|
||||||
*/
|
*/
|
||||||
physaddr_t offset;
|
physaddr_t offset;
|
||||||
|
/** DMA device (if unmapping is required) */
|
||||||
|
struct dma_device *dma;
|
||||||
/** Platform mapping token */
|
/** Platform mapping token */
|
||||||
void *token;
|
void *token;
|
||||||
};
|
};
|
||||||
|
@ -59,14 +61,14 @@ struct dma_operations {
|
||||||
* Map buffer for DMA
|
* Map buffer for DMA
|
||||||
*
|
*
|
||||||
* @v dma DMA device
|
* @v dma DMA device
|
||||||
|
* @v map DMA mapping to fill in
|
||||||
* @v addr Buffer address
|
* @v addr Buffer address
|
||||||
* @v len Length of buffer
|
* @v len Length of buffer
|
||||||
* @v flags Mapping flags
|
* @v flags Mapping flags
|
||||||
* @v map DMA mapping to fill in
|
|
||||||
* @ret rc Return status code
|
* @ret rc Return status code
|
||||||
*/
|
*/
|
||||||
int ( * map ) ( struct dma_device *dma, physaddr_t addr, size_t len,
|
int ( * map ) ( struct dma_device *dma, struct dma_mapping *map,
|
||||||
int flags, struct dma_mapping *map );
|
physaddr_t addr, size_t len, int flags );
|
||||||
/**
|
/**
|
||||||
* Unmap buffer
|
* Unmap buffer
|
||||||
*
|
*
|
||||||
|
@ -78,23 +80,23 @@ struct dma_operations {
|
||||||
* Allocate and map DMA-coherent buffer
|
* Allocate and map DMA-coherent buffer
|
||||||
*
|
*
|
||||||
* @v dma DMA device
|
* @v dma DMA device
|
||||||
|
* @v map DMA mapping to fill in
|
||||||
* @v len Length of buffer
|
* @v len Length of buffer
|
||||||
* @v align Physical alignment
|
* @v align Physical alignment
|
||||||
* @v map DMA mapping to fill in
|
|
||||||
* @ret addr Buffer address, or NULL on error
|
* @ret addr Buffer address, or NULL on error
|
||||||
*/
|
*/
|
||||||
void * ( * alloc ) ( struct dma_device *dma, size_t len, size_t align,
|
void * ( * alloc ) ( struct dma_device *dma, struct dma_mapping *map,
|
||||||
struct dma_mapping *map );
|
size_t len, size_t align );
|
||||||
/**
|
/**
|
||||||
* Unmap and free DMA-coherent buffer
|
* Unmap and free DMA-coherent buffer
|
||||||
*
|
*
|
||||||
* @v dma DMA device
|
* @v dma DMA device
|
||||||
|
* @v map DMA mapping
|
||||||
* @v addr Buffer address
|
* @v addr Buffer address
|
||||||
* @v len Length of buffer
|
* @v len Length of buffer
|
||||||
* @v map DMA mapping
|
|
||||||
*/
|
*/
|
||||||
void ( * free ) ( struct dma_device *dma, void *addr, size_t len,
|
void ( * free ) ( struct dma_device *dma, struct dma_mapping *map,
|
||||||
struct dma_mapping *map );
|
void *addr, size_t len );
|
||||||
/**
|
/**
|
||||||
* Set addressable space mask
|
* Set addressable space mask
|
||||||
*
|
*
|
||||||
|
@ -146,21 +148,23 @@ struct dma_operations {
|
||||||
* Map buffer for DMA
|
* Map buffer for DMA
|
||||||
*
|
*
|
||||||
* @v dma DMA device
|
* @v dma DMA device
|
||||||
|
* @v map DMA mapping to fill in
|
||||||
* @v addr Buffer address
|
* @v addr Buffer address
|
||||||
* @v len Length of buffer
|
* @v len Length of buffer
|
||||||
* @v flags Mapping flags
|
* @v flags Mapping flags
|
||||||
* @v map DMA mapping to fill in
|
|
||||||
* @ret rc Return status code
|
* @ret rc Return status code
|
||||||
*/
|
*/
|
||||||
static inline __always_inline int
|
static inline __always_inline int
|
||||||
DMAAPI_INLINE ( flat, dma_map ) ( struct dma_device *dma,
|
DMAAPI_INLINE ( flat, dma_map ) ( struct dma_device *dma,
|
||||||
|
struct dma_mapping *map,
|
||||||
physaddr_t addr __unused,
|
physaddr_t addr __unused,
|
||||||
size_t len __unused, int flags __unused,
|
size_t len __unused, int flags __unused ) {
|
||||||
struct dma_mapping *map __unused ) {
|
|
||||||
|
|
||||||
/* Increment mapping count (for debugging) */
|
/* Increment mapping count (for debugging) */
|
||||||
if ( DBG_LOG )
|
if ( DBG_LOG ) {
|
||||||
|
map->dma = dma;
|
||||||
dma->mapped++;
|
dma->mapped++;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -168,39 +172,42 @@ DMAAPI_INLINE ( flat, dma_map ) ( struct dma_device *dma,
|
||||||
/**
|
/**
|
||||||
* Unmap buffer
|
* Unmap buffer
|
||||||
*
|
*
|
||||||
* @v dma DMA device
|
|
||||||
* @v map DMA mapping
|
* @v map DMA mapping
|
||||||
*/
|
*/
|
||||||
static inline __always_inline void
|
static inline __always_inline void
|
||||||
DMAAPI_INLINE ( flat, dma_unmap ) ( struct dma_device *dma,
|
DMAAPI_INLINE ( flat, dma_unmap ) ( struct dma_mapping *map ) {
|
||||||
struct dma_mapping *map __unused ) {
|
|
||||||
|
|
||||||
/* Decrement mapping count (for debugging) */
|
/* Decrement mapping count (for debugging) */
|
||||||
if ( DBG_LOG )
|
if ( DBG_LOG ) {
|
||||||
dma->mapped--;
|
assert ( map->dma != NULL );
|
||||||
|
map->dma->mapped--;
|
||||||
|
map->dma = NULL;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Allocate and map DMA-coherent buffer
|
* Allocate and map DMA-coherent buffer
|
||||||
*
|
*
|
||||||
* @v dma DMA device
|
* @v dma DMA device
|
||||||
|
* @v map DMA mapping to fill in
|
||||||
* @v len Length of buffer
|
* @v len Length of buffer
|
||||||
* @v align Physical alignment
|
* @v align Physical alignment
|
||||||
* @v map DMA mapping to fill in
|
|
||||||
* @ret addr Buffer address, or NULL on error
|
* @ret addr Buffer address, or NULL on error
|
||||||
*/
|
*/
|
||||||
static inline __always_inline void *
|
static inline __always_inline void *
|
||||||
DMAAPI_INLINE ( flat, dma_alloc ) ( struct dma_device *dma,
|
DMAAPI_INLINE ( flat, dma_alloc ) ( struct dma_device *dma,
|
||||||
size_t len, size_t align,
|
struct dma_mapping *map,
|
||||||
struct dma_mapping *map __unused ) {
|
size_t len, size_t align ) {
|
||||||
void *addr;
|
void *addr;
|
||||||
|
|
||||||
/* Allocate buffer */
|
/* Allocate buffer */
|
||||||
addr = malloc_phys ( len, align );
|
addr = malloc_phys ( len, align );
|
||||||
|
|
||||||
/* Increment allocation count (for debugging) */
|
/* Increment mapping count (for debugging) */
|
||||||
if ( DBG_LOG && addr )
|
if ( DBG_LOG && addr ) {
|
||||||
dma->allocated++;
|
map->dma = dma;
|
||||||
|
dma->mapped++;
|
||||||
|
}
|
||||||
|
|
||||||
return addr;
|
return addr;
|
||||||
}
|
}
|
||||||
|
@ -208,22 +215,23 @@ DMAAPI_INLINE ( flat, dma_alloc ) ( struct dma_device *dma,
|
||||||
/**
|
/**
|
||||||
* Unmap and free DMA-coherent buffer
|
* Unmap and free DMA-coherent buffer
|
||||||
*
|
*
|
||||||
* @v dma DMA device
|
* @v map DMA mapping
|
||||||
* @v addr Buffer address
|
* @v addr Buffer address
|
||||||
* @v len Length of buffer
|
* @v len Length of buffer
|
||||||
* @v map DMA mapping
|
|
||||||
*/
|
*/
|
||||||
static inline __always_inline void
|
static inline __always_inline void
|
||||||
DMAAPI_INLINE ( flat, dma_free ) ( struct dma_device *dma,
|
DMAAPI_INLINE ( flat, dma_free ) ( struct dma_mapping *map,
|
||||||
void *addr, size_t len,
|
void *addr, size_t len ) {
|
||||||
struct dma_mapping *map __unused ) {
|
|
||||||
|
|
||||||
/* Free buffer */
|
/* Free buffer */
|
||||||
free_phys ( addr, len );
|
free_phys ( addr, len );
|
||||||
|
|
||||||
/* Decrement allocation count (for debugging) */
|
/* Decrement mapping count (for debugging) */
|
||||||
if ( DBG_LOG )
|
if ( DBG_LOG ) {
|
||||||
dma->allocated--;
|
assert ( map->dma != NULL );
|
||||||
|
map->dma->mapped--;
|
||||||
|
map->dma = NULL;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -272,45 +280,42 @@ DMAAPI_INLINE ( op, dma_phys ) ( struct dma_mapping *map, physaddr_t addr ) {
|
||||||
* Map buffer for DMA
|
* Map buffer for DMA
|
||||||
*
|
*
|
||||||
* @v dma DMA device
|
* @v dma DMA device
|
||||||
|
* @v map DMA mapping to fill in
|
||||||
* @v addr Buffer address
|
* @v addr Buffer address
|
||||||
* @v len Length of buffer
|
* @v len Length of buffer
|
||||||
* @v flags Mapping flags
|
* @v flags Mapping flags
|
||||||
* @v map DMA mapping to fill in
|
|
||||||
* @ret rc Return status code
|
* @ret rc Return status code
|
||||||
*/
|
*/
|
||||||
int dma_map ( struct dma_device *dma, physaddr_t addr, size_t len,
|
int dma_map ( struct dma_device *dma, struct dma_mapping *map,
|
||||||
int flags, struct dma_mapping *map );
|
physaddr_t addr, size_t len, int flags );
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Unmap buffer
|
* Unmap buffer
|
||||||
*
|
*
|
||||||
* @v dma DMA device
|
|
||||||
* @v map DMA mapping
|
* @v map DMA mapping
|
||||||
*/
|
*/
|
||||||
void dma_unmap ( struct dma_device *dma, struct dma_mapping *map );
|
void dma_unmap ( struct dma_mapping *map );
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Allocate and map DMA-coherent buffer
|
* Allocate and map DMA-coherent buffer
|
||||||
*
|
*
|
||||||
* @v dma DMA device
|
* @v dma DMA device
|
||||||
|
* @v map DMA mapping to fill in
|
||||||
* @v len Length of buffer
|
* @v len Length of buffer
|
||||||
* @v align Physical alignment
|
* @v align Physical alignment
|
||||||
* @v map DMA mapping to fill in
|
|
||||||
* @ret addr Buffer address, or NULL on error
|
* @ret addr Buffer address, or NULL on error
|
||||||
*/
|
*/
|
||||||
void * dma_alloc ( struct dma_device *dma, size_t len, size_t align,
|
void * dma_alloc ( struct dma_device *dma, struct dma_mapping *map,
|
||||||
struct dma_mapping *map );
|
size_t len, size_t align );
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Unmap and free DMA-coherent buffer
|
* Unmap and free DMA-coherent buffer
|
||||||
*
|
*
|
||||||
* @v dma DMA device
|
* @v map DMA mapping
|
||||||
* @v addr Buffer address
|
* @v addr Buffer address
|
||||||
* @v len Length of buffer
|
* @v len Length of buffer
|
||||||
* @v map DMA mapping
|
|
||||||
*/
|
*/
|
||||||
void dma_free ( struct dma_device *dma, void *addr, size_t len,
|
void dma_free ( struct dma_mapping *map, void *addr, size_t len );
|
||||||
struct dma_mapping *map );
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Set addressable space mask
|
* Set addressable space mask
|
||||||
|
@ -339,9 +344,22 @@ physaddr_t dma_phys ( struct dma_mapping *map, physaddr_t addr );
|
||||||
static inline __always_inline physaddr_t dma ( struct dma_mapping *map,
|
static inline __always_inline physaddr_t dma ( struct dma_mapping *map,
|
||||||
void *addr ) {
|
void *addr ) {
|
||||||
|
|
||||||
|
/* Get DMA address from corresponding physical address */
|
||||||
return dma_phys ( map, virt_to_phys ( addr ) );
|
return dma_phys ( map, virt_to_phys ( addr ) );
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if DMA unmapping is required
|
||||||
|
*
|
||||||
|
* @v map DMA mapping
|
||||||
|
* @v unmap Unmapping is required
|
||||||
|
*/
|
||||||
|
static inline __always_inline int dma_mapped ( struct dma_mapping *map ) {
|
||||||
|
|
||||||
|
/* Unmapping is required if a DMA device was recorded */
|
||||||
|
return ( map->dma != NULL );
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Initialise DMA device
|
* Initialise DMA device
|
||||||
*
|
*
|
||||||
|
@ -371,20 +389,21 @@ dma_set_mask_64bit ( struct dma_device *dma ) {
|
||||||
* Map I/O buffer for transmitting data to device
|
* Map I/O buffer for transmitting data to device
|
||||||
*
|
*
|
||||||
* @v dma DMA device
|
* @v dma DMA device
|
||||||
* @v iobuf I/O buffer
|
|
||||||
* @v map DMA mapping to fill in
|
* @v map DMA mapping to fill in
|
||||||
|
* @v iobuf I/O buffer
|
||||||
* @ret rc Return status code
|
* @ret rc Return status code
|
||||||
*/
|
*/
|
||||||
static inline __always_inline int
|
static inline __always_inline int
|
||||||
dma_map_tx_iob ( struct dma_device *dma, struct io_buffer *iobuf,
|
dma_map_tx_iob ( struct dma_device *dma, struct dma_mapping *map,
|
||||||
struct dma_mapping *map ) {
|
struct io_buffer *iobuf ) {
|
||||||
|
|
||||||
/* Map I/O buffer */
|
/* Map I/O buffer */
|
||||||
return dma_map ( dma, virt_to_phys ( iobuf->data ), iob_len ( iobuf ),
|
return dma_map ( dma, map, virt_to_phys ( iobuf->data ),
|
||||||
DMA_TX, map );
|
iob_len ( iobuf ), DMA_TX );
|
||||||
}
|
}
|
||||||
|
|
||||||
extern struct io_buffer * dma_alloc_rx_iob ( struct dma_device *dma, size_t len,
|
extern struct io_buffer * dma_alloc_rx_iob ( struct dma_device *dma,
|
||||||
struct dma_mapping *map );
|
struct dma_mapping *map,
|
||||||
|
size_t len );
|
||||||
|
|
||||||
#endif /* _IPXE_DMA_H */
|
#endif /* _IPXE_DMA_H */
|
||||||
|
|
|
@ -315,14 +315,14 @@ PROVIDE_PCIAPI ( efi, pci_ioremap, efipci_ioremap );
|
||||||
* Map buffer for DMA
|
* Map buffer for DMA
|
||||||
*
|
*
|
||||||
* @v dma DMA device
|
* @v dma DMA device
|
||||||
|
* @v map DMA mapping to fill in
|
||||||
* @v addr Buffer address
|
* @v addr Buffer address
|
||||||
* @v len Length of buffer
|
* @v len Length of buffer
|
||||||
* @v flags Mapping flags
|
* @v flags Mapping flags
|
||||||
* @v map DMA mapping to fill in
|
|
||||||
* @ret rc Return status code
|
* @ret rc Return status code
|
||||||
*/
|
*/
|
||||||
static int efipci_dma_map ( struct dma_device *dma, physaddr_t addr, size_t len,
|
static int efipci_dma_map ( struct dma_device *dma, struct dma_mapping *map,
|
||||||
int flags, struct dma_mapping *map ) {
|
physaddr_t addr, size_t len, int flags ) {
|
||||||
struct efi_pci_device *efipci =
|
struct efi_pci_device *efipci =
|
||||||
container_of ( dma, struct efi_pci_device, pci.dma );
|
container_of ( dma, struct efi_pci_device, pci.dma );
|
||||||
struct pci_device *pci = &efipci->pci;
|
struct pci_device *pci = &efipci->pci;
|
||||||
|
@ -374,6 +374,7 @@ static int efipci_dma_map ( struct dma_device *dma, physaddr_t addr, size_t len,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Populate mapping */
|
/* Populate mapping */
|
||||||
|
map->dma = dma;
|
||||||
map->offset = ( bus - addr );
|
map->offset = ( bus - addr );
|
||||||
map->token = mapping;
|
map->token = mapping;
|
||||||
|
|
||||||
|
@ -420,14 +421,14 @@ static void efipci_dma_unmap ( struct dma_device *dma,
|
||||||
* Allocate and map DMA-coherent buffer
|
* Allocate and map DMA-coherent buffer
|
||||||
*
|
*
|
||||||
* @v dma DMA device
|
* @v dma DMA device
|
||||||
|
* @v map DMA mapping to fill in
|
||||||
* @v len Length of buffer
|
* @v len Length of buffer
|
||||||
* @v align Physical alignment
|
* @v align Physical alignment
|
||||||
* @v map DMA mapping to fill in
|
|
||||||
* @ret addr Buffer address, or NULL on error
|
* @ret addr Buffer address, or NULL on error
|
||||||
*/
|
*/
|
||||||
static void * efipci_dma_alloc ( struct dma_device *dma, size_t len,
|
static void * efipci_dma_alloc ( struct dma_device *dma,
|
||||||
size_t align __unused,
|
struct dma_mapping *map,
|
||||||
struct dma_mapping *map ) {
|
size_t len, size_t align __unused ) {
|
||||||
struct efi_pci_device *efipci =
|
struct efi_pci_device *efipci =
|
||||||
container_of ( dma, struct efi_pci_device, pci.dma );
|
container_of ( dma, struct efi_pci_device, pci.dma );
|
||||||
struct pci_device *pci = &efipci->pci;
|
struct pci_device *pci = &efipci->pci;
|
||||||
|
@ -451,8 +452,8 @@ static void * efipci_dma_alloc ( struct dma_device *dma, size_t len,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Map buffer */
|
/* Map buffer */
|
||||||
if ( ( rc = efipci_dma_map ( dma, virt_to_phys ( addr ), len, DMA_BI,
|
if ( ( rc = efipci_dma_map ( dma, map, virt_to_phys ( addr ),
|
||||||
map ) ) != 0 )
|
len, DMA_BI ) ) != 0 )
|
||||||
goto err_map;
|
goto err_map;
|
||||||
|
|
||||||
/* Increment allocation count (for debugging) */
|
/* Increment allocation count (for debugging) */
|
||||||
|
@ -472,12 +473,12 @@ static void * efipci_dma_alloc ( struct dma_device *dma, size_t len,
|
||||||
* Unmap and free DMA-coherent buffer
|
* Unmap and free DMA-coherent buffer
|
||||||
*
|
*
|
||||||
* @v dma DMA device
|
* @v dma DMA device
|
||||||
|
* @v map DMA mapping
|
||||||
* @v addr Buffer address
|
* @v addr Buffer address
|
||||||
* @v len Length of buffer
|
* @v len Length of buffer
|
||||||
* @v map DMA mapping
|
|
||||||
*/
|
*/
|
||||||
static void efipci_dma_free ( struct dma_device *dma, void *addr, size_t len,
|
static void efipci_dma_free ( struct dma_device *dma, struct dma_mapping *map,
|
||||||
struct dma_mapping *map ) {
|
void *addr, size_t len ) {
|
||||||
struct efi_pci_device *efipci =
|
struct efi_pci_device *efipci =
|
||||||
container_of ( dma, struct efi_pci_device, pci.dma );
|
container_of ( dma, struct efi_pci_device, pci.dma );
|
||||||
EFI_PCI_IO_PROTOCOL *pci_io = efipci->io;
|
EFI_PCI_IO_PROTOCOL *pci_io = efipci->io;
|
||||||
|
|
Loading…
Reference in New Issue