[xhci] Update driver to use DMA API

Signed-off-by: Michael Brown <mcb30@ipxe.org>
pull/181/head
Michael Brown 2020-11-25 11:24:41 +00:00
parent 6e01b74a8a
commit 13a6d17296
3 changed files with 193 additions and 95 deletions

View File

@ -31,7 +31,6 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
#include <errno.h> #include <errno.h>
#include <byteswap.h> #include <byteswap.h>
#include <ipxe/malloc.h> #include <ipxe/malloc.h>
#include <ipxe/umalloc.h>
#include <ipxe/pci.h> #include <ipxe/pci.h>
#include <ipxe/usb.h> #include <ipxe/usb.h>
#include <ipxe/init.h> #include <ipxe/init.h>
@ -294,9 +293,9 @@ static void xhci_init ( struct xhci_device *xhci, void *regs ) {
/* Read structural parameters 2 */ /* Read structural parameters 2 */
hcsparams2 = readl ( xhci->cap + XHCI_CAP_HCSPARAMS2 ); hcsparams2 = readl ( xhci->cap + XHCI_CAP_HCSPARAMS2 );
xhci->scratchpads = XHCI_HCSPARAMS2_SCRATCHPADS ( hcsparams2 ); xhci->scratch.count = XHCI_HCSPARAMS2_SCRATCHPADS ( hcsparams2 );
DBGC2 ( xhci, "XHCI %s needs %d scratchpads\n", DBGC2 ( xhci, "XHCI %s needs %d scratchpads\n",
xhci->name, xhci->scratchpads ); xhci->name, xhci->scratch.count );
/* Read capability parameters 1 */ /* Read capability parameters 1 */
hccparams1 = readl ( xhci->cap + XHCI_CAP_HCCPARAMS1 ); hccparams1 = readl ( xhci->cap + XHCI_CAP_HCCPARAMS1 );
@ -918,27 +917,29 @@ static int xhci_dcbaa_alloc ( struct xhci_device *xhci ) {
* align on its own size (rounded up to a power of two and * align on its own size (rounded up to a power of two and
* with a minimum of 64 bytes). * with a minimum of 64 bytes).
*/ */
len = ( ( xhci->slots + 1 ) * sizeof ( xhci->dcbaa[0] ) ); len = ( ( xhci->slots + 1 ) * sizeof ( xhci->dcbaa.context[0] ) );
xhci->dcbaa = malloc_phys ( len, xhci_align ( len ) ); xhci->dcbaa.context = dma_alloc ( xhci->dma, &xhci->dcbaa.map, len,
if ( ! xhci->dcbaa ) { xhci_align ( len ) );
if ( ! xhci->dcbaa.context ) {
DBGC ( xhci, "XHCI %s could not allocate DCBAA\n", xhci->name ); DBGC ( xhci, "XHCI %s could not allocate DCBAA\n", xhci->name );
rc = -ENOMEM; rc = -ENOMEM;
goto err_alloc; goto err_alloc;
} }
memset ( xhci->dcbaa, 0, len ); memset ( xhci->dcbaa.context, 0, len );
/* Program DCBAA pointer */ /* Program DCBAA pointer */
dcbaap = virt_to_phys ( xhci->dcbaa ); dcbaap = dma ( &xhci->dcbaa.map, xhci->dcbaa.context );
if ( ( rc = xhci_writeq ( xhci, dcbaap, if ( ( rc = xhci_writeq ( xhci, dcbaap,
xhci->op + XHCI_OP_DCBAAP ) ) != 0 ) xhci->op + XHCI_OP_DCBAAP ) ) != 0 )
goto err_writeq; goto err_writeq;
DBGC2 ( xhci, "XHCI %s DCBAA at [%08lx,%08lx)\n", DBGC2 ( xhci, "XHCI %s DCBAA at [%08lx,%08lx)\n", xhci->name,
xhci->name, dcbaap, ( dcbaap + len ) ); virt_to_phys ( xhci->dcbaa.context ),
( virt_to_phys ( xhci->dcbaa.context ) + len ) );
return 0; return 0;
err_writeq: err_writeq:
free_phys ( xhci->dcbaa, len ); dma_free ( &xhci->dcbaa.map, xhci->dcbaa.context, len );
err_alloc: err_alloc:
return rc; return rc;
} }
@ -954,14 +955,14 @@ static void xhci_dcbaa_free ( struct xhci_device *xhci ) {
/* Sanity check */ /* Sanity check */
for ( i = 0 ; i <= xhci->slots ; i++ ) for ( i = 0 ; i <= xhci->slots ; i++ )
assert ( xhci->dcbaa[i] == 0 ); assert ( xhci->dcbaa.context[i] == 0 );
/* Clear DCBAA pointer */ /* Clear DCBAA pointer */
xhci_writeq ( xhci, 0, xhci->op + XHCI_OP_DCBAAP ); xhci_writeq ( xhci, 0, xhci->op + XHCI_OP_DCBAAP );
/* Free DCBAA */ /* Free DCBAA */
len = ( ( xhci->slots + 1 ) * sizeof ( xhci->dcbaa[0] ) ); len = ( ( xhci->slots + 1 ) * sizeof ( xhci->dcbaa.context[0] ) );
free_phys ( xhci->dcbaa, len ); dma_free ( &xhci->dcbaa.map, xhci->dcbaa.context, len );
} }
/****************************************************************************** /******************************************************************************
@ -978,32 +979,34 @@ static void xhci_dcbaa_free ( struct xhci_device *xhci ) {
* @ret rc Return status code * @ret rc Return status code
*/ */
static int xhci_scratchpad_alloc ( struct xhci_device *xhci ) { static int xhci_scratchpad_alloc ( struct xhci_device *xhci ) {
struct xhci_scratchpad *scratch = &xhci->scratch;
size_t buffer_len;
size_t array_len; size_t array_len;
size_t len; physaddr_t addr;
physaddr_t phys;
unsigned int i; unsigned int i;
int rc; int rc;
/* Do nothing if no scratchpad buffers are used */ /* Do nothing if no scratchpad buffers are used */
if ( ! xhci->scratchpads ) if ( ! scratch->count )
return 0; return 0;
/* Allocate scratchpads */ /* Allocate scratchpad buffers */
len = ( xhci->scratchpads * xhci->pagesize ); buffer_len = ( scratch->count * xhci->pagesize );
xhci->scratchpad = umalloc ( len ); scratch->buffer = dma_umalloc ( xhci->dma, &scratch->buffer_map,
if ( ! xhci->scratchpad ) { buffer_len, xhci->pagesize );
if ( ! scratch->buffer ) {
DBGC ( xhci, "XHCI %s could not allocate scratchpad buffers\n", DBGC ( xhci, "XHCI %s could not allocate scratchpad buffers\n",
xhci->name ); xhci->name );
rc = -ENOMEM; rc = -ENOMEM;
goto err_alloc; goto err_alloc;
} }
memset_user ( xhci->scratchpad, 0, 0, len ); memset_user ( scratch->buffer, 0, 0, buffer_len );
/* Allocate scratchpad array */ /* Allocate scratchpad array */
array_len = ( xhci->scratchpads * sizeof ( xhci->scratchpad_array[0] )); array_len = ( scratch->count * sizeof ( scratch->array[0] ) );
xhci->scratchpad_array = scratch->array = dma_alloc ( xhci->dma, &scratch->array_map,
malloc_phys ( array_len, xhci_align ( array_len ) ); array_len, xhci_align ( array_len ) );
if ( ! xhci->scratchpad_array ) { if ( ! scratch->array ) {
DBGC ( xhci, "XHCI %s could not allocate scratchpad buffer " DBGC ( xhci, "XHCI %s could not allocate scratchpad buffer "
"array\n", xhci->name ); "array\n", xhci->name );
rc = -ENOMEM; rc = -ENOMEM;
@ -1011,25 +1014,28 @@ static int xhci_scratchpad_alloc ( struct xhci_device *xhci ) {
} }
/* Populate scratchpad array */ /* Populate scratchpad array */
for ( i = 0 ; i < xhci->scratchpads ; i++ ) { addr = dma_phys ( &scratch->buffer_map,
phys = user_to_phys ( xhci->scratchpad, ( i * xhci->pagesize )); user_to_phys ( scratch->buffer, 0 ) );
xhci->scratchpad_array[i] = phys; for ( i = 0 ; i < scratch->count ; i++ ) {
scratch->array[i] = cpu_to_le64 ( addr );
addr += xhci->pagesize;
} }
/* Set scratchpad array pointer */ /* Set scratchpad array pointer */
assert ( xhci->dcbaa != NULL ); assert ( xhci->dcbaa.context != NULL );
xhci->dcbaa[0] = cpu_to_le64 ( virt_to_phys ( xhci->scratchpad_array )); xhci->dcbaa.context[0] = cpu_to_le64 ( dma ( &scratch->array_map,
scratch->array ) );
DBGC2 ( xhci, "XHCI %s scratchpad [%08lx,%08lx) array [%08lx,%08lx)\n", DBGC2 ( xhci, "XHCI %s scratchpad [%08lx,%08lx) array [%08lx,%08lx)\n",
xhci->name, user_to_phys ( xhci->scratchpad, 0 ), xhci->name, user_to_phys ( scratch->buffer, 0 ),
user_to_phys ( xhci->scratchpad, len ), user_to_phys ( scratch->buffer, buffer_len ),
virt_to_phys ( xhci->scratchpad_array ), virt_to_phys ( scratch->array ),
( virt_to_phys ( xhci->scratchpad_array ) + array_len ) ); ( virt_to_phys ( scratch->array ) + array_len ) );
return 0; return 0;
free_phys ( xhci->scratchpad_array, array_len ); dma_free ( &scratch->array_map, scratch->array, array_len );
err_alloc_array: err_alloc_array:
ufree ( xhci->scratchpad ); dma_ufree ( &scratch->buffer_map, scratch->buffer, buffer_len );
err_alloc: err_alloc:
return rc; return rc;
} }
@ -1040,22 +1046,25 @@ static int xhci_scratchpad_alloc ( struct xhci_device *xhci ) {
* @v xhci xHCI device * @v xhci xHCI device
*/ */
static void xhci_scratchpad_free ( struct xhci_device *xhci ) { static void xhci_scratchpad_free ( struct xhci_device *xhci ) {
struct xhci_scratchpad *scratch = &xhci->scratch;
size_t array_len; size_t array_len;
size_t buffer_len;
/* Do nothing if no scratchpad buffers are used */ /* Do nothing if no scratchpad buffers are used */
if ( ! xhci->scratchpads ) if ( ! scratch->count )
return; return;
/* Clear scratchpad array pointer */ /* Clear scratchpad array pointer */
assert ( xhci->dcbaa != NULL ); assert ( xhci->dcbaa.context != NULL );
xhci->dcbaa[0] = 0; xhci->dcbaa.context[0] = 0;
/* Free scratchpad array */ /* Free scratchpad array */
array_len = ( xhci->scratchpads * sizeof ( xhci->scratchpad_array[0] )); array_len = ( scratch->count * sizeof ( scratch->array[0] ) );
free_phys ( xhci->scratchpad_array, array_len ); dma_free ( &scratch->array_map, scratch->array, array_len );
/* Free scratchpads */ /* Free scratchpad buffers */
ufree ( xhci->scratchpad ); buffer_len = ( scratch->count * xhci->pagesize );
dma_ufree ( &scratch->buffer_map, scratch->buffer, buffer_len );
} }
/****************************************************************************** /******************************************************************************
@ -1202,7 +1211,8 @@ static int xhci_ring_alloc ( struct xhci_device *xhci,
} }
/* Allocate TRBs */ /* Allocate TRBs */
ring->trb = malloc_phys ( ring->len, xhci_align ( ring->len ) ); ring->trb = dma_alloc ( xhci->dma, &ring->map, ring->len,
xhci_align ( ring->len ) );
if ( ! ring->trb ) { if ( ! ring->trb ) {
rc = -ENOMEM; rc = -ENOMEM;
goto err_alloc_trb; goto err_alloc_trb;
@ -1211,14 +1221,14 @@ static int xhci_ring_alloc ( struct xhci_device *xhci,
/* Initialise Link TRB */ /* Initialise Link TRB */
link = &ring->trb[count].link; link = &ring->trb[count].link;
link->next = cpu_to_le64 ( virt_to_phys ( ring->trb ) ); link->next = cpu_to_le64 ( dma ( &ring->map, ring->trb ) );
link->flags = XHCI_TRB_TC; link->flags = XHCI_TRB_TC;
link->type = XHCI_TRB_LINK; link->type = XHCI_TRB_LINK;
ring->link = link; ring->link = link;
return 0; return 0;
free_phys ( ring->trb, ring->len ); dma_free ( &ring->map, ring->trb, ring->len );
err_alloc_trb: err_alloc_trb:
free ( ring->iobuf ); free ( ring->iobuf );
err_alloc_iobuf: err_alloc_iobuf:
@ -1256,7 +1266,7 @@ static void xhci_ring_free ( struct xhci_trb_ring *ring ) {
assert ( ring->iobuf[i] == NULL ); assert ( ring->iobuf[i] == NULL );
/* Free TRBs */ /* Free TRBs */
free_phys ( ring->trb, ring->len ); dma_free ( &ring->map, ring->trb, ring->len );
/* Free I/O buffers */ /* Free I/O buffers */
free ( ring->iobuf ); free ( ring->iobuf );
@ -1422,13 +1432,14 @@ static int xhci_command_alloc ( struct xhci_device *xhci ) {
goto err_ring_alloc; goto err_ring_alloc;
/* Program command ring control register */ /* Program command ring control register */
crp = virt_to_phys ( xhci->command.trb ); crp = dma ( &xhci->command.map, xhci->command.trb );
if ( ( rc = xhci_writeq ( xhci, ( crp | XHCI_CRCR_RCS ), if ( ( rc = xhci_writeq ( xhci, ( crp | XHCI_CRCR_RCS ),
xhci->op + XHCI_OP_CRCR ) ) != 0 ) xhci->op + XHCI_OP_CRCR ) ) != 0 )
goto err_writeq; goto err_writeq;
DBGC2 ( xhci, "XHCI %s CRCR at [%08lx,%08lx)\n", DBGC2 ( xhci, "XHCI %s CRCR at [%08lx,%08lx)\n", xhci->name,
xhci->name, crp, ( crp + xhci->command.len ) ); virt_to_phys ( xhci->command.trb ),
( virt_to_phys ( xhci->command.trb ) + xhci->command.len ) );
return 0; return 0;
err_writeq: err_writeq:
@ -1469,7 +1480,8 @@ static int xhci_event_alloc ( struct xhci_device *xhci ) {
/* Allocate event ring */ /* Allocate event ring */
count = ( 1 << XHCI_EVENT_TRBS_LOG2 ); count = ( 1 << XHCI_EVENT_TRBS_LOG2 );
len = ( count * sizeof ( event->trb[0] ) ); len = ( count * sizeof ( event->trb[0] ) );
event->trb = malloc_phys ( len, xhci_align ( len ) ); event->trb = dma_alloc ( xhci->dma, &event->trb_map, len,
xhci_align ( len ) );
if ( ! event->trb ) { if ( ! event->trb ) {
rc = -ENOMEM; rc = -ENOMEM;
goto err_alloc_trb; goto err_alloc_trb;
@ -1477,22 +1489,25 @@ static int xhci_event_alloc ( struct xhci_device *xhci ) {
memset ( event->trb, 0, len ); memset ( event->trb, 0, len );
/* Allocate event ring segment table */ /* Allocate event ring segment table */
event->segment = malloc_phys ( sizeof ( event->segment[0] ), event->segment = dma_alloc ( xhci->dma, &event->segment_map,
xhci_align ( sizeof(event->segment[0]))); sizeof ( event->segment[0] ),
xhci_align ( sizeof (event->segment[0])));
if ( ! event->segment ) { if ( ! event->segment ) {
rc = -ENOMEM; rc = -ENOMEM;
goto err_alloc_segment; goto err_alloc_segment;
} }
memset ( event->segment, 0, sizeof ( event->segment[0] ) ); memset ( event->segment, 0, sizeof ( event->segment[0] ) );
event->segment[0].base = cpu_to_le64 ( virt_to_phys ( event->trb ) ); event->segment[0].base = cpu_to_le64 ( dma ( &event->trb_map,
event->trb ) );
event->segment[0].count = cpu_to_le32 ( count ); event->segment[0].count = cpu_to_le32 ( count );
/* Program event ring registers */ /* Program event ring registers */
writel ( 1, xhci->run + XHCI_RUN_ERSTSZ ( 0 ) ); writel ( 1, xhci->run + XHCI_RUN_ERSTSZ ( 0 ) );
if ( ( rc = xhci_writeq ( xhci, virt_to_phys ( event->trb ), if ( ( rc = xhci_writeq ( xhci, dma ( &event->trb_map, event->trb ),
xhci->run + XHCI_RUN_ERDP ( 0 ) ) ) != 0 ) xhci->run + XHCI_RUN_ERDP ( 0 ) ) ) != 0 )
goto err_writeq_erdp; goto err_writeq_erdp;
if ( ( rc = xhci_writeq ( xhci, virt_to_phys ( event->segment ), if ( ( rc = xhci_writeq ( xhci,
dma ( &event->segment_map, event->segment ),
xhci->run + XHCI_RUN_ERSTBA ( 0 ) ) ) != 0 ) xhci->run + XHCI_RUN_ERSTBA ( 0 ) ) ) != 0 )
goto err_writeq_erstba; goto err_writeq_erstba;
@ -1501,16 +1516,17 @@ static int xhci_event_alloc ( struct xhci_device *xhci ) {
( virt_to_phys ( event->trb ) + len ), ( virt_to_phys ( event->trb ) + len ),
virt_to_phys ( event->segment ), virt_to_phys ( event->segment ),
( virt_to_phys ( event->segment ) + ( virt_to_phys ( event->segment ) +
sizeof (event->segment[0] ) ) ); sizeof ( event->segment[0] ) ) );
return 0; return 0;
xhci_writeq ( xhci, 0, xhci->run + XHCI_RUN_ERSTBA ( 0 ) ); xhci_writeq ( xhci, 0, xhci->run + XHCI_RUN_ERSTBA ( 0 ) );
err_writeq_erstba: err_writeq_erstba:
xhci_writeq ( xhci, 0, xhci->run + XHCI_RUN_ERDP ( 0 ) ); xhci_writeq ( xhci, 0, xhci->run + XHCI_RUN_ERDP ( 0 ) );
err_writeq_erdp: err_writeq_erdp:
free_phys ( event->trb, len ); dma_free ( &event->segment_map, event->segment,
sizeof ( event->segment[0] ) );
err_alloc_segment: err_alloc_segment:
free_phys ( event->segment, sizeof ( event->segment[0] ) ); dma_free ( &event->trb_map, event->trb, len );
err_alloc_trb: err_alloc_trb:
return rc; return rc;
} }
@ -1531,12 +1547,13 @@ static void xhci_event_free ( struct xhci_device *xhci ) {
xhci_writeq ( xhci, 0, xhci->run + XHCI_RUN_ERDP ( 0 ) ); xhci_writeq ( xhci, 0, xhci->run + XHCI_RUN_ERDP ( 0 ) );
/* Free event ring segment table */ /* Free event ring segment table */
free_phys ( event->segment, sizeof ( event->segment[0] ) ); dma_free ( &event->segment_map, event->segment,
sizeof ( event->segment[0] ) );
/* Free event ring */ /* Free event ring */
count = ( 1 << XHCI_EVENT_TRBS_LOG2 ); count = ( 1 << XHCI_EVENT_TRBS_LOG2 );
len = ( count * sizeof ( event->trb[0] ) ); len = ( count * sizeof ( event->trb[0] ) );
free_phys ( event->trb, len ); dma_free ( &event->trb_map, event->trb, len );
} }
/** /**
@ -1577,6 +1594,9 @@ static void xhci_transfer ( struct xhci_device *xhci,
iobuf = xhci_dequeue_multi ( &endpoint->ring ); iobuf = xhci_dequeue_multi ( &endpoint->ring );
assert ( iobuf != NULL ); assert ( iobuf != NULL );
/* Unmap I/O buffer */
iob_unmap ( iobuf );
/* Check for errors */ /* Check for errors */
if ( ! ( ( trb->code == XHCI_CMPLT_SUCCESS ) || if ( ! ( ( trb->code == XHCI_CMPLT_SUCCESS ) ||
( trb->code == XHCI_CMPLT_SHORT ) ) ) { ( trb->code == XHCI_CMPLT_SHORT ) ) ) {
@ -1745,7 +1765,7 @@ static void xhci_event_poll ( struct xhci_device *xhci ) {
/* Update dequeue pointer if applicable */ /* Update dequeue pointer if applicable */
if ( consumed ) { if ( consumed ) {
xhci_writeq ( xhci, virt_to_phys ( trb ), xhci_writeq ( xhci, dma ( &event->trb_map, trb ),
xhci->run + XHCI_RUN_ERDP ( 0 ) ); xhci->run + XHCI_RUN_ERDP ( 0 ) );
profile_stop ( &xhci_event_profiler ); profile_stop ( &xhci_event_profiler );
} }
@ -1774,7 +1794,7 @@ static void xhci_abort ( struct xhci_device *xhci ) {
/* Reset the command ring control register */ /* Reset the command ring control register */
xhci_ring_reset ( &xhci->command ); xhci_ring_reset ( &xhci->command );
crp = virt_to_phys ( xhci->command.trb ); crp = dma ( &xhci->command.map, xhci->command.trb );
xhci_writeq ( xhci, ( crp | XHCI_CRCR_RCS ), xhci->op + XHCI_OP_CRCR ); xhci_writeq ( xhci, ( crp | XHCI_CRCR_RCS ), xhci->op + XHCI_OP_CRCR );
} }
@ -1942,13 +1962,15 @@ static int xhci_context ( struct xhci_device *xhci, struct xhci_slot *slot,
void *input ) ) { void *input ) ) {
union xhci_trb trb; union xhci_trb trb;
struct xhci_trb_context *context = &trb.context; struct xhci_trb_context *context = &trb.context;
struct dma_mapping map;
size_t len; size_t len;
void *input; void *input;
int rc; int rc;
/* Allocate an input context */ /* Allocate an input context */
memset ( &map, 0, sizeof ( map ) );
len = xhci_input_context_offset ( xhci, XHCI_CTX_END ); len = xhci_input_context_offset ( xhci, XHCI_CTX_END );
input = malloc_phys ( len, xhci_align ( len ) ); input = dma_alloc ( xhci->dma, &map, len, xhci_align ( len ) );
if ( ! input ) { if ( ! input ) {
rc = -ENOMEM; rc = -ENOMEM;
goto err_alloc; goto err_alloc;
@ -1961,7 +1983,7 @@ static int xhci_context ( struct xhci_device *xhci, struct xhci_slot *slot,
/* Construct command */ /* Construct command */
memset ( context, 0, sizeof ( *context ) ); memset ( context, 0, sizeof ( *context ) );
context->type = type; context->type = type;
context->input = cpu_to_le64 ( virt_to_phys ( input ) ); context->input = cpu_to_le64 ( dma ( &map, input ) );
context->slot = slot->id; context->slot = slot->id;
/* Issue command and wait for completion */ /* Issue command and wait for completion */
@ -1969,7 +1991,7 @@ static int xhci_context ( struct xhci_device *xhci, struct xhci_slot *slot,
goto err_command; goto err_command;
err_command: err_command:
free_phys ( input, len ); dma_free ( &map, input, len );
err_alloc: err_alloc:
return rc; return rc;
} }
@ -1986,6 +2008,7 @@ static void xhci_address_device_input ( struct xhci_device *xhci,
struct xhci_slot *slot, struct xhci_slot *slot,
struct xhci_endpoint *endpoint, struct xhci_endpoint *endpoint,
void *input ) { void *input ) {
struct xhci_trb_ring *ring = &endpoint->ring;
struct xhci_control_context *control_ctx; struct xhci_control_context *control_ctx;
struct xhci_slot_context *slot_ctx; struct xhci_slot_context *slot_ctx;
struct xhci_endpoint_context *ep_ctx; struct xhci_endpoint_context *ep_ctx;
@ -2011,7 +2034,7 @@ static void xhci_address_device_input ( struct xhci_device *xhci,
ep_ctx->type = XHCI_EP_TYPE_CONTROL; ep_ctx->type = XHCI_EP_TYPE_CONTROL;
ep_ctx->burst = endpoint->ep->burst; ep_ctx->burst = endpoint->ep->burst;
ep_ctx->mtu = cpu_to_le16 ( endpoint->ep->mtu ); ep_ctx->mtu = cpu_to_le16 ( endpoint->ep->mtu );
ep_ctx->dequeue = cpu_to_le64 ( virt_to_phys ( endpoint->ring.trb ) | ep_ctx->dequeue = cpu_to_le64 ( dma ( &ring->map, ring->trb ) |
XHCI_EP_DCS ); XHCI_EP_DCS );
ep_ctx->trb_len = cpu_to_le16 ( XHCI_EP0_TRB_LEN ); ep_ctx->trb_len = cpu_to_le16 ( XHCI_EP0_TRB_LEN );
} }
@ -2057,6 +2080,7 @@ static void xhci_configure_endpoint_input ( struct xhci_device *xhci,
struct xhci_slot *slot, struct xhci_slot *slot,
struct xhci_endpoint *endpoint, struct xhci_endpoint *endpoint,
void *input ) { void *input ) {
struct xhci_trb_ring *ring = &endpoint->ring;
struct xhci_control_context *control_ctx; struct xhci_control_context *control_ctx;
struct xhci_slot_context *slot_ctx; struct xhci_slot_context *slot_ctx;
struct xhci_endpoint_context *ep_ctx; struct xhci_endpoint_context *ep_ctx;
@ -2079,7 +2103,7 @@ static void xhci_configure_endpoint_input ( struct xhci_device *xhci,
ep_ctx->type = endpoint->type; ep_ctx->type = endpoint->type;
ep_ctx->burst = endpoint->ep->burst; ep_ctx->burst = endpoint->ep->burst;
ep_ctx->mtu = cpu_to_le16 ( endpoint->ep->mtu ); ep_ctx->mtu = cpu_to_le16 ( endpoint->ep->mtu );
ep_ctx->dequeue = cpu_to_le64 ( virt_to_phys ( endpoint->ring.trb ) | ep_ctx->dequeue = cpu_to_le64 ( dma ( &ring->map, ring->trb ) |
XHCI_EP_DCS ); XHCI_EP_DCS );
ep_ctx->trb_len = cpu_to_le16 ( endpoint->ep->mtu ); /* best guess */ ep_ctx->trb_len = cpu_to_le16 ( endpoint->ep->mtu ); /* best guess */
} }
@ -2297,6 +2321,7 @@ xhci_set_tr_dequeue_pointer ( struct xhci_device *xhci,
unsigned int mask; unsigned int mask;
unsigned int index; unsigned int index;
unsigned int dcs; unsigned int dcs;
physaddr_t addr;
int rc; int rc;
/* Construct command */ /* Construct command */
@ -2305,8 +2330,8 @@ xhci_set_tr_dequeue_pointer ( struct xhci_device *xhci,
mask = ring->mask; mask = ring->mask;
dcs = ( ( ~( cons >> ring->shift ) ) & XHCI_EP_DCS ); dcs = ( ( ~( cons >> ring->shift ) ) & XHCI_EP_DCS );
index = ( cons & mask ); index = ( cons & mask );
dequeue->dequeue = addr = dma ( &ring->map, &ring->trb[index] );
cpu_to_le64 ( virt_to_phys ( &ring->trb[index] ) | dcs ); dequeue->dequeue = cpu_to_le64 ( addr | dcs );
dequeue->slot = slot->id; dequeue->slot = slot->id;
dequeue->endpoint = endpoint->ctx; dequeue->endpoint = endpoint->ctx;
dequeue->type = XHCI_TRB_SET_TR_DEQUEUE_POINTER; dequeue->type = XHCI_TRB_SET_TR_DEQUEUE_POINTER;
@ -2425,6 +2450,7 @@ static void xhci_endpoint_close ( struct usb_endpoint *ep ) {
/* Cancel any incomplete transfers */ /* Cancel any incomplete transfers */
while ( xhci_ring_fill ( &endpoint->ring ) ) { while ( xhci_ring_fill ( &endpoint->ring ) ) {
iobuf = xhci_dequeue_multi ( &endpoint->ring ); iobuf = xhci_dequeue_multi ( &endpoint->ring );
iob_unmap ( iobuf );
usb_complete_err ( ep, iobuf, -ECANCELED ); usb_complete_err ( ep, iobuf, -ECANCELED );
} }
@ -2491,6 +2517,7 @@ static int xhci_endpoint_mtu ( struct usb_endpoint *ep ) {
static int xhci_endpoint_message ( struct usb_endpoint *ep, static int xhci_endpoint_message ( struct usb_endpoint *ep,
struct io_buffer *iobuf ) { struct io_buffer *iobuf ) {
struct xhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep ); struct xhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep );
struct xhci_device *xhci = endpoint->xhci;
struct usb_setup_packet *packet; struct usb_setup_packet *packet;
unsigned int input; unsigned int input;
size_t len; size_t len;
@ -2520,10 +2547,15 @@ static int xhci_endpoint_message ( struct usb_endpoint *ep,
if ( len ) if ( len )
setup->direction = ( input ? XHCI_SETUP_IN : XHCI_SETUP_OUT ); setup->direction = ( input ? XHCI_SETUP_IN : XHCI_SETUP_OUT );
/* Map I/O buffer */
if ( ( rc = iob_map ( iobuf, xhci->dma, len,
( input ? DMA_RX : DMA_TX ) ) ) != 0 )
goto err_map;
/* Construct data stage TRB, if applicable */ /* Construct data stage TRB, if applicable */
if ( len ) { if ( len ) {
data = &(trb++)->data; data = &(trb++)->data;
data->data = cpu_to_le64 ( virt_to_phys ( iobuf->data ) ); data->data = cpu_to_le64 ( iob_dma ( iobuf ) );
data->len = cpu_to_le32 ( len ); data->len = cpu_to_le32 ( len );
data->type = XHCI_TRB_DATA; data->type = XHCI_TRB_DATA;
data->direction = ( input ? XHCI_DATA_IN : XHCI_DATA_OUT ); data->direction = ( input ? XHCI_DATA_IN : XHCI_DATA_OUT );
@ -2539,13 +2571,18 @@ static int xhci_endpoint_message ( struct usb_endpoint *ep,
/* Enqueue TRBs */ /* Enqueue TRBs */
if ( ( rc = xhci_enqueue_multi ( &endpoint->ring, iobuf, trbs, if ( ( rc = xhci_enqueue_multi ( &endpoint->ring, iobuf, trbs,
( trb - trbs ) ) ) != 0 ) ( trb - trbs ) ) ) != 0 )
return rc; goto err_enqueue;
/* Ring the doorbell */ /* Ring the doorbell */
xhci_doorbell ( &endpoint->ring ); xhci_doorbell ( &endpoint->ring );
profile_stop ( &xhci_message_profiler ); profile_stop ( &xhci_message_profiler );
return 0; return 0;
err_enqueue:
iob_unmap ( iobuf );
err_map:
return rc;
} }
/** /**
@ -2579,12 +2616,13 @@ static unsigned int xhci_endpoint_count ( size_t len, int zlp ) {
static int xhci_endpoint_stream ( struct usb_endpoint *ep, static int xhci_endpoint_stream ( struct usb_endpoint *ep,
struct io_buffer *iobuf, int zlp ) { struct io_buffer *iobuf, int zlp ) {
struct xhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep ); struct xhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep );
void *data = iobuf->data; struct xhci_device *xhci = endpoint->xhci;
size_t len = iob_len ( iobuf ); size_t len = iob_len ( iobuf );
unsigned int count = xhci_endpoint_count ( len, zlp ); unsigned int count = xhci_endpoint_count ( len, zlp );
union xhci_trb trbs[count]; union xhci_trb trbs[count];
union xhci_trb *trb = trbs; union xhci_trb *trb = trbs;
struct xhci_trb_normal *normal; struct xhci_trb_normal *normal;
physaddr_t data;
unsigned int i; unsigned int i;
size_t trb_len; size_t trb_len;
int rc; int rc;
@ -2592,6 +2630,13 @@ static int xhci_endpoint_stream ( struct usb_endpoint *ep,
/* Profile stream transfers */ /* Profile stream transfers */
profile_start ( &xhci_stream_profiler ); profile_start ( &xhci_stream_profiler );
/* Map I/O buffer */
if ( ( rc = iob_map ( iobuf, xhci->dma, len,
( ( ep->address & USB_DIR_IN ) ?
DMA_RX : DMA_TX ) ) ) != 0 )
goto err_map;
data = iob_dma ( iobuf );
/* Construct normal TRBs */ /* Construct normal TRBs */
memset ( &trbs, 0, sizeof ( trbs ) ); memset ( &trbs, 0, sizeof ( trbs ) );
for ( i = 0 ; i < count ; i ++ ) { for ( i = 0 ; i < count ; i ++ ) {
@ -2603,7 +2648,7 @@ static int xhci_endpoint_stream ( struct usb_endpoint *ep,
/* Construct normal TRB */ /* Construct normal TRB */
normal = &trb->normal; normal = &trb->normal;
normal->data = cpu_to_le64 ( virt_to_phys ( data ) ); normal->data = cpu_to_le64 ( data );
normal->len = cpu_to_le32 ( trb_len ); normal->len = cpu_to_le32 ( trb_len );
normal->type = XHCI_TRB_NORMAL; normal->type = XHCI_TRB_NORMAL;
normal->flags = XHCI_TRB_CH; normal->flags = XHCI_TRB_CH;
@ -2624,13 +2669,18 @@ static int xhci_endpoint_stream ( struct usb_endpoint *ep,
/* Enqueue TRBs */ /* Enqueue TRBs */
if ( ( rc = xhci_enqueue_multi ( &endpoint->ring, iobuf, trbs, if ( ( rc = xhci_enqueue_multi ( &endpoint->ring, iobuf, trbs,
count ) ) != 0 ) count ) ) != 0 )
return rc; goto err_enqueue;
/* Ring the doorbell */ /* Ring the doorbell */
xhci_doorbell ( &endpoint->ring ); xhci_doorbell ( &endpoint->ring );
profile_stop ( &xhci_stream_profiler ); profile_stop ( &xhci_stream_profiler );
return 0; return 0;
err_enqueue:
iob_unmap ( iobuf );
err_map:
return rc;
} }
/****************************************************************************** /******************************************************************************
@ -2693,7 +2743,8 @@ static int xhci_device_open ( struct usb_device *usb ) {
/* Allocate a device context */ /* Allocate a device context */
len = xhci_device_context_offset ( xhci, XHCI_CTX_END ); len = xhci_device_context_offset ( xhci, XHCI_CTX_END );
slot->context = malloc_phys ( len, xhci_align ( len ) ); slot->context = dma_alloc ( xhci->dma, &slot->map, len,
xhci_align ( len ) );
if ( ! slot->context ) { if ( ! slot->context ) {
rc = -ENOMEM; rc = -ENOMEM;
goto err_alloc_context; goto err_alloc_context;
@ -2701,16 +2752,17 @@ static int xhci_device_open ( struct usb_device *usb ) {
memset ( slot->context, 0, len ); memset ( slot->context, 0, len );
/* Set device context base address */ /* Set device context base address */
assert ( xhci->dcbaa[id] == 0 ); assert ( xhci->dcbaa.context[id] == 0 );
xhci->dcbaa[id] = cpu_to_le64 ( virt_to_phys ( slot->context ) ); xhci->dcbaa.context[id] = cpu_to_le64 ( dma ( &slot->map,
slot->context ) );
DBGC2 ( xhci, "XHCI %s slot %d device context [%08lx,%08lx) for %s\n", DBGC2 ( xhci, "XHCI %s slot %d device context [%08lx,%08lx) for %s\n",
xhci->name, slot->id, virt_to_phys ( slot->context ), xhci->name, slot->id, virt_to_phys ( slot->context ),
( virt_to_phys ( slot->context ) + len ), usb->name ); ( virt_to_phys ( slot->context ) + len ), usb->name );
return 0; return 0;
xhci->dcbaa[id] = 0; xhci->dcbaa.context[id] = 0;
free_phys ( slot->context, len ); dma_free ( &slot->map, slot->context, len );
err_alloc_context: err_alloc_context:
xhci->slot[id] = NULL; xhci->slot[id] = NULL;
free ( slot ); free ( slot );
@ -2750,8 +2802,8 @@ static void xhci_device_close ( struct usb_device *usb ) {
/* Free slot */ /* Free slot */
if ( slot->context ) { if ( slot->context ) {
free_phys ( slot->context, len ); dma_free ( &slot->map, slot->context, len );
xhci->dcbaa[id] = 0; xhci->dcbaa.context[id] = 0;
} }
xhci->slot[id] = NULL; xhci->slot[id] = NULL;
free ( slot ); free ( slot );
@ -3270,6 +3322,11 @@ static int xhci_probe ( struct pci_device *pci ) {
/* Initialise xHCI device */ /* Initialise xHCI device */
xhci_init ( xhci, xhci->regs ); xhci_init ( xhci, xhci->regs );
/* Configure DMA device */
xhci->dma = &pci->dma;
if ( xhci->addr64 )
dma_set_mask_64bit ( xhci->dma );
/* Initialise USB legacy support and claim ownership */ /* Initialise USB legacy support and claim ownership */
xhci_legacy_init ( xhci ); xhci_legacy_init ( xhci );
xhci_legacy_claim ( xhci ); xhci_legacy_claim ( xhci );

View File

@ -854,6 +854,8 @@ struct xhci_trb_ring {
union xhci_trb *trb; union xhci_trb *trb;
/** Length of transfer request blocks */ /** Length of transfer request blocks */
size_t len; size_t len;
/** DMA mapping */
struct dma_mapping map;
/** Link TRB (if applicable) */ /** Link TRB (if applicable) */
struct xhci_trb_link *link; struct xhci_trb_link *link;
@ -869,8 +871,12 @@ struct xhci_event_ring {
unsigned int cons; unsigned int cons;
/** Event ring segment table */ /** Event ring segment table */
struct xhci_event_ring_segment *segment; struct xhci_event_ring_segment *segment;
/** Event ring segment table DMA mapping */
struct dma_mapping segment_map;
/** Transfer request blocks */ /** Transfer request blocks */
union xhci_trb *trb; union xhci_trb *trb;
/** Transfer request blocks DMA mapping */
struct dma_mapping trb_map;
}; };
/** /**
@ -1035,10 +1041,34 @@ struct xhci_pch {
/** Invalid protocol speed ID values quirk */ /** Invalid protocol speed ID values quirk */
#define XHCI_BAD_PSIV 0x0002 #define XHCI_BAD_PSIV 0x0002
/** Device context base address array */
struct xhci_dcbaa {
/** Context base addresses */
uint64_t *context;
/** DMA mapping */
struct dma_mapping map;
};
/** Scratchpad buffer */
struct xhci_scratchpad {
/** Number of page-sized scratchpad buffers */
unsigned int count;
/** Scratchpad buffer area */
userptr_t buffer;
/** Buffer DMA mapping */
struct dma_mapping buffer_map;
/** Scratchpad array */
uint64_t *array;
/** Array DMA mapping */
struct dma_mapping array_map;
};
/** An xHCI device */ /** An xHCI device */
struct xhci_device { struct xhci_device {
/** Registers */ /** Registers */
void *regs; void *regs;
/** DMA device */
struct dma_device *dma;
/** Name */ /** Name */
const char *name; const char *name;
/** Quirks */ /** Quirks */
@ -1060,9 +1090,6 @@ struct xhci_device {
/** Number of ports */ /** Number of ports */
unsigned int ports; unsigned int ports;
/** Number of page-sized scratchpad buffers */
unsigned int scratchpads;
/** 64-bit addressing capability */ /** 64-bit addressing capability */
int addr64; int addr64;
/** Context size shift */ /** Context size shift */
@ -1077,12 +1104,10 @@ struct xhci_device {
unsigned int legacy; unsigned int legacy;
/** Device context base address array */ /** Device context base address array */
uint64_t *dcbaa; struct xhci_dcbaa dcbaa;
/** Scratchpad buffer area */ /** Scratchpad buffer */
userptr_t scratchpad; struct xhci_scratchpad scratch;
/** Scratchpad buffer array */
uint64_t *scratchpad_array;
/** Command ring */ /** Command ring */
struct xhci_trb_ring command; struct xhci_trb_ring command;
@ -1111,6 +1136,8 @@ struct xhci_slot {
unsigned int id; unsigned int id;
/** Slot context */ /** Slot context */
struct xhci_slot_context *context; struct xhci_slot_context *context;
/** DMA mapping */
struct dma_mapping map;
/** Route string */ /** Route string */
unsigned int route; unsigned int route;
/** Root hub port number */ /** Root hub port number */

View File

@ -214,6 +214,22 @@ static inline void iob_populate ( struct io_buffer *iobuf,
(iobuf) = NULL; \ (iobuf) = NULL; \
__iobuf; } ) __iobuf; } )
/**
* Map I/O buffer for DMA
*
* @v iobuf I/O buffer
* @v dma DMA device
* @v len Length to map
* @v flags Mapping flags
* @ret rc Return status code
*/
static inline __always_inline int iob_map ( struct io_buffer *iobuf,
struct dma_device *dma,
size_t len, int flags ) {
return dma_map ( dma, &iobuf->map, virt_to_phys ( iobuf->data ),
len, flags );
}
/** /**
* Map I/O buffer for transmit DMA * Map I/O buffer for transmit DMA
* *
@ -223,8 +239,7 @@ static inline void iob_populate ( struct io_buffer *iobuf,
*/ */
static inline __always_inline int iob_map_tx ( struct io_buffer *iobuf, static inline __always_inline int iob_map_tx ( struct io_buffer *iobuf,
struct dma_device *dma ) { struct dma_device *dma ) {
return dma_map ( dma, &iobuf->map, virt_to_phys ( iobuf->data ), return iob_map ( iobuf, dma, iob_len ( iobuf ), DMA_TX );
iob_len ( iobuf ), DMA_TX );
} }
/** /**
@ -237,8 +252,7 @@ static inline __always_inline int iob_map_tx ( struct io_buffer *iobuf,
static inline __always_inline int iob_map_rx ( struct io_buffer *iobuf, static inline __always_inline int iob_map_rx ( struct io_buffer *iobuf,
struct dma_device *dma ) { struct dma_device *dma ) {
assert ( iob_len ( iobuf ) == 0 ); assert ( iob_len ( iobuf ) == 0 );
return dma_map ( dma, &iobuf->map, virt_to_phys ( iobuf->data ), return iob_map ( iobuf, dma, iob_tailroom ( iobuf ), DMA_RX );
iob_tailroom ( iobuf ), DMA_RX );
} }
/** /**