mirror of https://github.com/ipxe/ipxe.git
[malloc] Rename malloc_dma() to malloc_phys()
The malloc_dma() function allocates memory with specified physical alignment, and is typically (though not exclusively) used to allocate memory for DMA. Rename to malloc_phys() to more closely match the functionality, and to create name space for functions that specifically allocate and map DMA-capable buffers. Signed-off-by: Michael Brown <mcb30@ipxe.org>pull/171/head
parent
36dde9b0bf
commit
be1c87b722
|
@ -83,7 +83,7 @@ hv_alloc_pages ( struct hv_hypervisor *hv, ... ) {
|
|||
/* Allocate and zero pages */
|
||||
va_start ( args, hv );
|
||||
for ( i = 0 ; ( ( page = va_arg ( args, void ** ) ) != NULL ); i++ ) {
|
||||
*page = malloc_dma ( PAGE_SIZE, PAGE_SIZE );
|
||||
*page = malloc_phys ( PAGE_SIZE, PAGE_SIZE );
|
||||
if ( ! *page )
|
||||
goto err_alloc;
|
||||
memset ( *page, 0, PAGE_SIZE );
|
||||
|
@ -97,7 +97,7 @@ hv_alloc_pages ( struct hv_hypervisor *hv, ... ) {
|
|||
va_start ( args, hv );
|
||||
for ( ; i >= 0 ; i-- ) {
|
||||
page = va_arg ( args, void ** );
|
||||
free_dma ( *page, PAGE_SIZE );
|
||||
free_phys ( *page, PAGE_SIZE );
|
||||
}
|
||||
va_end ( args );
|
||||
return -ENOMEM;
|
||||
|
@ -116,7 +116,7 @@ hv_free_pages ( struct hv_hypervisor *hv, ... ) {
|
|||
|
||||
va_start ( args, hv );
|
||||
while ( ( page = va_arg ( args, void * ) ) != NULL )
|
||||
free_dma ( page, PAGE_SIZE );
|
||||
free_phys ( page, PAGE_SIZE );
|
||||
va_end ( args );
|
||||
}
|
||||
|
||||
|
@ -131,8 +131,8 @@ static int hv_alloc_message ( struct hv_hypervisor *hv ) {
|
|||
/* Allocate buffer. Must be aligned to at least 8 bytes and
|
||||
* must not cross a page boundary, so align on its own size.
|
||||
*/
|
||||
hv->message = malloc_dma ( sizeof ( *hv->message ),
|
||||
sizeof ( *hv->message ) );
|
||||
hv->message = malloc_phys ( sizeof ( *hv->message ),
|
||||
sizeof ( *hv->message ) );
|
||||
if ( ! hv->message )
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -147,7 +147,7 @@ static int hv_alloc_message ( struct hv_hypervisor *hv ) {
|
|||
static void hv_free_message ( struct hv_hypervisor *hv ) {
|
||||
|
||||
/* Free buffer */
|
||||
free_dma ( hv->message, sizeof ( *hv->message ) );
|
||||
free_phys ( hv->message, sizeof ( *hv->message ) );
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -106,7 +106,7 @@ static int hvm_map_hypercall ( struct hvm_device *hvm ) {
|
|||
|
||||
/* Allocate pages */
|
||||
hvm->hypercall_len = ( pages * PAGE_SIZE );
|
||||
hvm->xen.hypercall = malloc_dma ( hvm->hypercall_len, PAGE_SIZE );
|
||||
hvm->xen.hypercall = malloc_phys ( hvm->hypercall_len, PAGE_SIZE );
|
||||
if ( ! hvm->xen.hypercall ) {
|
||||
DBGC ( hvm, "HVM could not allocate %d hypercall page(s)\n",
|
||||
pages );
|
||||
|
@ -141,7 +141,7 @@ static int hvm_map_hypercall ( struct hvm_device *hvm ) {
|
|||
static void hvm_unmap_hypercall ( struct hvm_device *hvm ) {
|
||||
|
||||
/* Free pages */
|
||||
free_dma ( hvm->xen.hypercall, hvm->hypercall_len );
|
||||
free_phys ( hvm->xen.hypercall, hvm->hypercall_len );
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -88,8 +88,8 @@ struct io_buffer * alloc_iob_raw ( size_t len, size_t align, size_t offset ) {
|
|||
len += ( ( - len - offset ) & ( __alignof__ ( *iobuf ) - 1 ) );
|
||||
|
||||
/* Allocate memory for buffer plus descriptor */
|
||||
data = malloc_dma_offset ( len + sizeof ( *iobuf ), align,
|
||||
offset );
|
||||
data = malloc_phys_offset ( len + sizeof ( *iobuf ), align,
|
||||
offset );
|
||||
if ( ! data )
|
||||
return NULL;
|
||||
iobuf = ( data + len );
|
||||
|
@ -97,14 +97,14 @@ struct io_buffer * alloc_iob_raw ( size_t len, size_t align, size_t offset ) {
|
|||
} else {
|
||||
|
||||
/* Allocate memory for buffer */
|
||||
data = malloc_dma_offset ( len, align, offset );
|
||||
data = malloc_phys_offset ( len, align, offset );
|
||||
if ( ! data )
|
||||
return NULL;
|
||||
|
||||
/* Allocate memory for descriptor */
|
||||
iobuf = malloc ( sizeof ( *iobuf ) );
|
||||
if ( ! iobuf ) {
|
||||
free_dma ( data, len );
|
||||
free_phys ( data, len );
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
@ -159,12 +159,12 @@ void free_iob ( struct io_buffer *iobuf ) {
|
|||
if ( iobuf->end == iobuf ) {
|
||||
|
||||
/* Descriptor is inline */
|
||||
free_dma ( iobuf->head, ( len + sizeof ( *iobuf ) ) );
|
||||
free_phys ( iobuf->head, ( len + sizeof ( *iobuf ) ) );
|
||||
|
||||
} else {
|
||||
|
||||
/* Descriptor is detached */
|
||||
free_dma ( iobuf->head, len );
|
||||
free_phys ( iobuf->head, len );
|
||||
free ( iobuf );
|
||||
}
|
||||
}
|
||||
|
|
|
@ -596,8 +596,8 @@ void * malloc ( size_t size ) {
|
|||
*
|
||||
* @v ptr Memory allocated by malloc(), or NULL
|
||||
*
|
||||
* Memory allocated with malloc_dma() cannot be freed with free(); it
|
||||
* must be freed with free_dma() instead.
|
||||
* Memory allocated with malloc_phys() cannot be freed with free(); it
|
||||
* must be freed with free_phys() instead.
|
||||
*
|
||||
* If @c ptr is NULL, no action is taken.
|
||||
*/
|
||||
|
|
|
@ -639,8 +639,8 @@ static int arbel_create_cq ( struct ib_device *ibdev,
|
|||
|
||||
/* Allocate completion queue itself */
|
||||
arbel_cq->cqe_size = ( cq->num_cqes * sizeof ( arbel_cq->cqe[0] ) );
|
||||
arbel_cq->cqe = malloc_dma ( arbel_cq->cqe_size,
|
||||
sizeof ( arbel_cq->cqe[0] ) );
|
||||
arbel_cq->cqe = malloc_phys ( arbel_cq->cqe_size,
|
||||
sizeof ( arbel_cq->cqe[0] ) );
|
||||
if ( ! arbel_cq->cqe ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_cqe;
|
||||
|
@ -697,7 +697,7 @@ static int arbel_create_cq ( struct ib_device *ibdev,
|
|||
err_sw2hw_cq:
|
||||
MLX_FILL_1 ( ci_db_rec, 1, res, ARBEL_UAR_RES_NONE );
|
||||
MLX_FILL_1 ( arm_db_rec, 1, res, ARBEL_UAR_RES_NONE );
|
||||
free_dma ( arbel_cq->cqe, arbel_cq->cqe_size );
|
||||
free_phys ( arbel_cq->cqe, arbel_cq->cqe_size );
|
||||
err_cqe:
|
||||
free ( arbel_cq );
|
||||
err_arbel_cq:
|
||||
|
@ -737,7 +737,7 @@ static void arbel_destroy_cq ( struct ib_device *ibdev,
|
|||
MLX_FILL_1 ( arm_db_rec, 1, res, ARBEL_UAR_RES_NONE );
|
||||
|
||||
/* Free memory */
|
||||
free_dma ( arbel_cq->cqe, arbel_cq->cqe_size );
|
||||
free_phys ( arbel_cq->cqe, arbel_cq->cqe_size );
|
||||
free ( arbel_cq );
|
||||
|
||||
/* Mark queue number as free */
|
||||
|
@ -873,8 +873,8 @@ static int arbel_create_send_wq ( struct arbel_send_work_queue *arbel_send_wq,
|
|||
/* Allocate work queue */
|
||||
arbel_send_wq->wqe_size = ( num_wqes *
|
||||
sizeof ( arbel_send_wq->wqe[0] ) );
|
||||
arbel_send_wq->wqe = malloc_dma ( arbel_send_wq->wqe_size,
|
||||
sizeof ( arbel_send_wq->wqe[0] ) );
|
||||
arbel_send_wq->wqe = malloc_phys ( arbel_send_wq->wqe_size,
|
||||
sizeof ( arbel_send_wq->wqe[0] ) );
|
||||
if ( ! arbel_send_wq->wqe )
|
||||
return -ENOMEM;
|
||||
memset ( arbel_send_wq->wqe, 0, arbel_send_wq->wqe_size );
|
||||
|
@ -914,8 +914,8 @@ static int arbel_create_recv_wq ( struct arbel_recv_work_queue *arbel_recv_wq,
|
|||
/* Allocate work queue */
|
||||
arbel_recv_wq->wqe_size = ( num_wqes *
|
||||
sizeof ( arbel_recv_wq->wqe[0] ) );
|
||||
arbel_recv_wq->wqe = malloc_dma ( arbel_recv_wq->wqe_size,
|
||||
sizeof ( arbel_recv_wq->wqe[0] ) );
|
||||
arbel_recv_wq->wqe = malloc_phys ( arbel_recv_wq->wqe_size,
|
||||
sizeof ( arbel_recv_wq->wqe[0] ) );
|
||||
if ( ! arbel_recv_wq->wqe ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc_wqe;
|
||||
|
@ -927,8 +927,8 @@ static int arbel_create_recv_wq ( struct arbel_recv_work_queue *arbel_recv_wq,
|
|||
( type == IB_QPT_UD ) ) {
|
||||
arbel_recv_wq->grh_size = ( num_wqes *
|
||||
sizeof ( arbel_recv_wq->grh[0] ) );
|
||||
arbel_recv_wq->grh = malloc_dma ( arbel_recv_wq->grh_size,
|
||||
sizeof ( void * ) );
|
||||
arbel_recv_wq->grh = malloc_phys ( arbel_recv_wq->grh_size,
|
||||
sizeof ( void * ) );
|
||||
if ( ! arbel_recv_wq->grh ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc_grh;
|
||||
|
@ -954,9 +954,9 @@ static int arbel_create_recv_wq ( struct arbel_recv_work_queue *arbel_recv_wq,
|
|||
|
||||
return 0;
|
||||
|
||||
free_dma ( arbel_recv_wq->grh, arbel_recv_wq->grh_size );
|
||||
free_phys ( arbel_recv_wq->grh, arbel_recv_wq->grh_size );
|
||||
err_alloc_grh:
|
||||
free_dma ( arbel_recv_wq->wqe, arbel_recv_wq->wqe_size );
|
||||
free_phys ( arbel_recv_wq->wqe, arbel_recv_wq->wqe_size );
|
||||
err_alloc_wqe:
|
||||
return rc;
|
||||
}
|
||||
|
@ -1102,10 +1102,10 @@ static int arbel_create_qp ( struct ib_device *ibdev,
|
|||
MLX_FILL_1 ( send_db_rec, 1, res, ARBEL_UAR_RES_NONE );
|
||||
MLX_FILL_1 ( recv_db_rec, 1, res, ARBEL_UAR_RES_NONE );
|
||||
err_unsupported_address_split:
|
||||
free_dma ( arbel_qp->recv.grh, arbel_qp->recv.grh_size );
|
||||
free_dma ( arbel_qp->recv.wqe, arbel_qp->recv.wqe_size );
|
||||
free_phys ( arbel_qp->recv.grh, arbel_qp->recv.grh_size );
|
||||
free_phys ( arbel_qp->recv.wqe, arbel_qp->recv.wqe_size );
|
||||
err_create_recv_wq:
|
||||
free_dma ( arbel_qp->send.wqe, arbel_qp->send.wqe_size );
|
||||
free_phys ( arbel_qp->send.wqe, arbel_qp->send.wqe_size );
|
||||
err_create_send_wq:
|
||||
free ( arbel_qp );
|
||||
err_arbel_qp:
|
||||
|
@ -1231,9 +1231,9 @@ static void arbel_destroy_qp ( struct ib_device *ibdev,
|
|||
MLX_FILL_1 ( recv_db_rec, 1, res, ARBEL_UAR_RES_NONE );
|
||||
|
||||
/* Free memory */
|
||||
free_dma ( arbel_qp->recv.grh, arbel_qp->recv.grh_size );
|
||||
free_dma ( arbel_qp->recv.wqe, arbel_qp->recv.wqe_size );
|
||||
free_dma ( arbel_qp->send.wqe, arbel_qp->send.wqe_size );
|
||||
free_phys ( arbel_qp->recv.grh, arbel_qp->recv.grh_size );
|
||||
free_phys ( arbel_qp->recv.wqe, arbel_qp->recv.wqe_size );
|
||||
free_phys ( arbel_qp->send.wqe, arbel_qp->send.wqe_size );
|
||||
free ( arbel_qp );
|
||||
|
||||
/* Mark queue number as free */
|
||||
|
@ -1758,8 +1758,8 @@ static int arbel_create_eq ( struct arbel *arbel ) {
|
|||
/* Allocate event queue itself */
|
||||
arbel_eq->eqe_size =
|
||||
( ARBEL_NUM_EQES * sizeof ( arbel_eq->eqe[0] ) );
|
||||
arbel_eq->eqe = malloc_dma ( arbel_eq->eqe_size,
|
||||
sizeof ( arbel_eq->eqe[0] ) );
|
||||
arbel_eq->eqe = malloc_phys ( arbel_eq->eqe_size,
|
||||
sizeof ( arbel_eq->eqe[0] ) );
|
||||
if ( ! arbel_eq->eqe ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_eqe;
|
||||
|
@ -1806,7 +1806,7 @@ static int arbel_create_eq ( struct arbel *arbel ) {
|
|||
err_map_eq:
|
||||
arbel_cmd_hw2sw_eq ( arbel, arbel_eq->eqn, &eqctx );
|
||||
err_sw2hw_eq:
|
||||
free_dma ( arbel_eq->eqe, arbel_eq->eqe_size );
|
||||
free_phys ( arbel_eq->eqe, arbel_eq->eqe_size );
|
||||
err_eqe:
|
||||
memset ( arbel_eq, 0, sizeof ( *arbel_eq ) );
|
||||
return rc;
|
||||
|
@ -1844,7 +1844,7 @@ static void arbel_destroy_eq ( struct arbel *arbel ) {
|
|||
}
|
||||
|
||||
/* Free memory */
|
||||
free_dma ( arbel_eq->eqe, arbel_eq->eqe_size );
|
||||
free_phys ( arbel_eq->eqe, arbel_eq->eqe_size );
|
||||
memset ( arbel_eq, 0, sizeof ( *arbel_eq ) );
|
||||
}
|
||||
|
||||
|
@ -2455,7 +2455,7 @@ static int arbel_alloc_icm ( struct arbel *arbel,
|
|||
icm_phys = user_to_phys ( arbel->icm, 0 );
|
||||
|
||||
/* Allocate doorbell UAR */
|
||||
arbel->db_rec = malloc_dma ( ARBEL_PAGE_SIZE, ARBEL_PAGE_SIZE );
|
||||
arbel->db_rec = malloc_phys ( ARBEL_PAGE_SIZE, ARBEL_PAGE_SIZE );
|
||||
if ( ! arbel->db_rec ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc_doorbell;
|
||||
|
@ -2513,7 +2513,7 @@ static int arbel_alloc_icm ( struct arbel *arbel,
|
|||
err_map_icm:
|
||||
arbel_cmd_unmap_icm_aux ( arbel );
|
||||
err_map_icm_aux:
|
||||
free_dma ( arbel->db_rec, ARBEL_PAGE_SIZE );
|
||||
free_phys ( arbel->db_rec, ARBEL_PAGE_SIZE );
|
||||
arbel->db_rec= NULL;
|
||||
err_alloc_doorbell:
|
||||
err_alloc_icm:
|
||||
|
@ -2536,7 +2536,7 @@ static void arbel_free_icm ( struct arbel *arbel ) {
|
|||
arbel_cmd_unmap_icm ( arbel, ( arbel->icm_len / ARBEL_PAGE_SIZE ),
|
||||
&unmap_icm );
|
||||
arbel_cmd_unmap_icm_aux ( arbel );
|
||||
free_dma ( arbel->db_rec, ARBEL_PAGE_SIZE );
|
||||
free_phys ( arbel->db_rec, ARBEL_PAGE_SIZE );
|
||||
arbel->db_rec = NULL;
|
||||
}
|
||||
|
||||
|
@ -2984,18 +2984,18 @@ static struct arbel * arbel_alloc ( void ) {
|
|||
goto err_arbel;
|
||||
|
||||
/* Allocate space for mailboxes */
|
||||
arbel->mailbox_in = malloc_dma ( ARBEL_MBOX_SIZE, ARBEL_MBOX_ALIGN );
|
||||
arbel->mailbox_in = malloc_phys ( ARBEL_MBOX_SIZE, ARBEL_MBOX_ALIGN );
|
||||
if ( ! arbel->mailbox_in )
|
||||
goto err_mailbox_in;
|
||||
arbel->mailbox_out = malloc_dma ( ARBEL_MBOX_SIZE, ARBEL_MBOX_ALIGN );
|
||||
arbel->mailbox_out = malloc_phys ( ARBEL_MBOX_SIZE, ARBEL_MBOX_ALIGN );
|
||||
if ( ! arbel->mailbox_out )
|
||||
goto err_mailbox_out;
|
||||
|
||||
return arbel;
|
||||
|
||||
free_dma ( arbel->mailbox_out, ARBEL_MBOX_SIZE );
|
||||
free_phys ( arbel->mailbox_out, ARBEL_MBOX_SIZE );
|
||||
err_mailbox_out:
|
||||
free_dma ( arbel->mailbox_in, ARBEL_MBOX_SIZE );
|
||||
free_phys ( arbel->mailbox_in, ARBEL_MBOX_SIZE );
|
||||
err_mailbox_in:
|
||||
free ( arbel );
|
||||
err_arbel:
|
||||
|
@ -3011,8 +3011,8 @@ static void arbel_free ( struct arbel *arbel ) {
|
|||
|
||||
ufree ( arbel->icm );
|
||||
ufree ( arbel->firmware_area );
|
||||
free_dma ( arbel->mailbox_out, ARBEL_MBOX_SIZE );
|
||||
free_dma ( arbel->mailbox_in, ARBEL_MBOX_SIZE );
|
||||
free_phys ( arbel->mailbox_out, ARBEL_MBOX_SIZE );
|
||||
free_phys ( arbel->mailbox_in, ARBEL_MBOX_SIZE );
|
||||
free ( arbel );
|
||||
}
|
||||
|
||||
|
|
|
@ -585,9 +585,9 @@ static inline int golan_set_access_reg ( struct golan *golan __attribute__ (( un
|
|||
|
||||
static inline void golan_cmd_uninit ( struct golan *golan )
|
||||
{
|
||||
free_dma(golan->mboxes.outbox, GOLAN_PAGE_SIZE);
|
||||
free_dma(golan->mboxes.inbox, GOLAN_PAGE_SIZE);
|
||||
free_dma(golan->cmd.addr, GOLAN_PAGE_SIZE);
|
||||
free_phys(golan->mboxes.outbox, GOLAN_PAGE_SIZE);
|
||||
free_phys(golan->mboxes.inbox, GOLAN_PAGE_SIZE);
|
||||
free_phys(golan->cmd.addr, GOLAN_PAGE_SIZE);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -602,17 +602,17 @@ static inline int golan_cmd_init ( struct golan *golan )
|
|||
int rc = 0;
|
||||
uint32_t addr_l_sz;
|
||||
|
||||
if (!(golan->cmd.addr = malloc_dma(GOLAN_PAGE_SIZE , GOLAN_PAGE_SIZE))) {
|
||||
if (!(golan->cmd.addr = malloc_phys(GOLAN_PAGE_SIZE , GOLAN_PAGE_SIZE))) {
|
||||
rc = -ENOMEM;
|
||||
goto malloc_dma_failed;
|
||||
goto malloc_phys_failed;
|
||||
}
|
||||
if (!(golan->mboxes.inbox = malloc_dma(GOLAN_PAGE_SIZE , GOLAN_PAGE_SIZE))) {
|
||||
if (!(golan->mboxes.inbox = malloc_phys(GOLAN_PAGE_SIZE , GOLAN_PAGE_SIZE))) {
|
||||
rc = -ENOMEM;
|
||||
goto malloc_dma_inbox_failed;
|
||||
goto malloc_phys_inbox_failed;
|
||||
}
|
||||
if (!(golan->mboxes.outbox = malloc_dma(GOLAN_PAGE_SIZE , GOLAN_PAGE_SIZE))) {
|
||||
if (!(golan->mboxes.outbox = malloc_phys(GOLAN_PAGE_SIZE , GOLAN_PAGE_SIZE))) {
|
||||
rc = -ENOMEM;
|
||||
goto malloc_dma_outbox_failed;
|
||||
goto malloc_phys_outbox_failed;
|
||||
}
|
||||
addr_l_sz = be32_to_cpu(readl(&golan->iseg->cmdq_addr_l_sz));
|
||||
|
||||
|
@ -629,11 +629,11 @@ static inline int golan_cmd_init ( struct golan *golan )
|
|||
DBGC( golan , "%s Command interface was initialized\n", __FUNCTION__);
|
||||
return 0;
|
||||
|
||||
malloc_dma_outbox_failed:
|
||||
free_dma(golan->mboxes.inbox, GOLAN_PAGE_SIZE);
|
||||
malloc_dma_inbox_failed:
|
||||
free_dma(golan->cmd.addr, GOLAN_PAGE_SIZE);
|
||||
malloc_dma_failed:
|
||||
malloc_phys_outbox_failed:
|
||||
free_phys(golan->mboxes.inbox, GOLAN_PAGE_SIZE);
|
||||
malloc_phys_inbox_failed:
|
||||
free_phys(golan->cmd.addr, GOLAN_PAGE_SIZE);
|
||||
malloc_phys_failed:
|
||||
DBGC (golan ,"%s Failed to initialize command interface (rc = 0x%x)\n",
|
||||
__FUNCTION__, rc);
|
||||
return rc;
|
||||
|
@ -743,7 +743,7 @@ static int golan_create_eq(struct golan *golan)
|
|||
|
||||
eq->cons_index = 0;
|
||||
eq->size = GOLAN_NUM_EQES * sizeof(eq->eqes[0]);
|
||||
eq->eqes = malloc_dma ( GOLAN_PAGE_SIZE, GOLAN_PAGE_SIZE );
|
||||
eq->eqes = malloc_phys ( GOLAN_PAGE_SIZE, GOLAN_PAGE_SIZE );
|
||||
if (!eq->eqes) {
|
||||
rc = -ENOMEM;
|
||||
goto err_create_eq_eqe_alloc;
|
||||
|
@ -781,7 +781,7 @@ static int golan_create_eq(struct golan *golan)
|
|||
return 0;
|
||||
|
||||
err_create_eq_cmd:
|
||||
free_dma ( eq->eqes , GOLAN_PAGE_SIZE );
|
||||
free_phys ( eq->eqes , GOLAN_PAGE_SIZE );
|
||||
err_create_eq_eqe_alloc:
|
||||
DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
|
||||
return rc;
|
||||
|
@ -806,7 +806,7 @@ static void golan_destory_eq(struct golan *golan)
|
|||
rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
|
||||
GOLAN_PRINT_RC_AND_CMD_STATUS;
|
||||
|
||||
free_dma ( golan->eq.eqes , GOLAN_PAGE_SIZE );
|
||||
free_phys ( golan->eq.eqes , GOLAN_PAGE_SIZE );
|
||||
golan->eq.eqn = 0;
|
||||
|
||||
DBGC( golan, "%s Event queue (0x%x) was destroyed\n", __FUNCTION__, eqn);
|
||||
|
@ -962,14 +962,14 @@ static int golan_create_cq(struct ib_device *ibdev,
|
|||
goto err_create_cq;
|
||||
}
|
||||
golan_cq->size = sizeof(golan_cq->cqes[0]) * cq->num_cqes;
|
||||
golan_cq->doorbell_record = malloc_dma(GOLAN_CQ_DB_RECORD_SIZE,
|
||||
golan_cq->doorbell_record = malloc_phys(GOLAN_CQ_DB_RECORD_SIZE,
|
||||
GOLAN_CQ_DB_RECORD_SIZE);
|
||||
if (!golan_cq->doorbell_record) {
|
||||
rc = -ENOMEM;
|
||||
goto err_create_cq_db_alloc;
|
||||
}
|
||||
|
||||
golan_cq->cqes = malloc_dma ( GOLAN_PAGE_SIZE, GOLAN_PAGE_SIZE );
|
||||
golan_cq->cqes = malloc_phys ( GOLAN_PAGE_SIZE, GOLAN_PAGE_SIZE );
|
||||
if (!golan_cq->cqes) {
|
||||
rc = -ENOMEM;
|
||||
goto err_create_cq_cqe_alloc;
|
||||
|
@ -1008,9 +1008,9 @@ static int golan_create_cq(struct ib_device *ibdev,
|
|||
return 0;
|
||||
|
||||
err_create_cq_cmd:
|
||||
free_dma( golan_cq->cqes , GOLAN_PAGE_SIZE );
|
||||
free_phys( golan_cq->cqes , GOLAN_PAGE_SIZE );
|
||||
err_create_cq_cqe_alloc:
|
||||
free_dma(golan_cq->doorbell_record, GOLAN_CQ_DB_RECORD_SIZE);
|
||||
free_phys(golan_cq->doorbell_record, GOLAN_CQ_DB_RECORD_SIZE);
|
||||
err_create_cq_db_alloc:
|
||||
free ( golan_cq );
|
||||
err_create_cq:
|
||||
|
@ -1045,8 +1045,8 @@ static void golan_destroy_cq(struct ib_device *ibdev,
|
|||
cq->cqn = 0;
|
||||
|
||||
ib_cq_set_drvdata(cq, NULL);
|
||||
free_dma ( golan_cq->cqes , GOLAN_PAGE_SIZE );
|
||||
free_dma(golan_cq->doorbell_record, GOLAN_CQ_DB_RECORD_SIZE);
|
||||
free_phys ( golan_cq->cqes , GOLAN_PAGE_SIZE );
|
||||
free_phys(golan_cq->doorbell_record, GOLAN_CQ_DB_RECORD_SIZE);
|
||||
free(golan_cq);
|
||||
|
||||
DBGC (golan, "%s CQ number 0x%x was destroyed\n", __FUNCTION__, cqn);
|
||||
|
@ -1138,7 +1138,7 @@ static int golan_create_qp_aux(struct ib_device *ibdev,
|
|||
golan_qp->size = golan_qp->sq.size + golan_qp->rq.size;
|
||||
|
||||
/* allocate dma memory for WQEs (1 page is enough) - should change it */
|
||||
golan_qp->wqes = malloc_dma ( GOLAN_PAGE_SIZE, GOLAN_PAGE_SIZE );
|
||||
golan_qp->wqes = malloc_phys ( GOLAN_PAGE_SIZE, GOLAN_PAGE_SIZE );
|
||||
if (!golan_qp->wqes) {
|
||||
rc = -ENOMEM;
|
||||
goto err_create_qp_wqe_alloc;
|
||||
|
@ -1160,7 +1160,7 @@ static int golan_create_qp_aux(struct ib_device *ibdev,
|
|||
data++;
|
||||
}
|
||||
|
||||
golan_qp->doorbell_record = malloc_dma(sizeof(struct golan_qp_db),
|
||||
golan_qp->doorbell_record = malloc_phys(sizeof(struct golan_qp_db),
|
||||
sizeof(struct golan_qp_db));
|
||||
if (!golan_qp->doorbell_record) {
|
||||
rc = -ENOMEM;
|
||||
|
@ -1213,9 +1213,9 @@ static int golan_create_qp_aux(struct ib_device *ibdev,
|
|||
return 0;
|
||||
|
||||
err_create_qp_cmd:
|
||||
free_dma(golan_qp->doorbell_record, sizeof(struct golan_qp_db));
|
||||
free_phys(golan_qp->doorbell_record, sizeof(struct golan_qp_db));
|
||||
err_create_qp_db_alloc:
|
||||
free_dma ( golan_qp->wqes, GOLAN_PAGE_SIZE );
|
||||
free_phys ( golan_qp->wqes, GOLAN_PAGE_SIZE );
|
||||
err_create_qp_wqe_alloc:
|
||||
err_create_qp_sq_size:
|
||||
err_create_qp_sq_wqe_size:
|
||||
|
@ -1422,8 +1422,8 @@ static void golan_destroy_qp(struct ib_device *ibdev,
|
|||
qp->qpn = 0;
|
||||
|
||||
ib_qp_set_drvdata(qp, NULL);
|
||||
free_dma(golan_qp->doorbell_record, sizeof(struct golan_qp_db));
|
||||
free_dma ( golan_qp->wqes, GOLAN_PAGE_SIZE );
|
||||
free_phys(golan_qp->doorbell_record, sizeof(struct golan_qp_db));
|
||||
free_phys ( golan_qp->wqes, GOLAN_PAGE_SIZE );
|
||||
free(golan_qp);
|
||||
|
||||
DBGC( golan ,"%s QP 0x%lx was destroyed\n", __FUNCTION__, qpn);
|
||||
|
|
|
@ -864,8 +864,8 @@ static int hermon_create_cq ( struct ib_device *ibdev,
|
|||
}
|
||||
|
||||
/* Allocate doorbell */
|
||||
hermon_cq->doorbell = malloc_dma ( sizeof ( hermon_cq->doorbell[0] ),
|
||||
sizeof ( hermon_cq->doorbell[0] ) );
|
||||
hermon_cq->doorbell = malloc_phys ( sizeof ( hermon_cq->doorbell[0] ),
|
||||
sizeof ( hermon_cq->doorbell[0] ) );
|
||||
if ( ! hermon_cq->doorbell ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_doorbell;
|
||||
|
@ -874,8 +874,8 @@ static int hermon_create_cq ( struct ib_device *ibdev,
|
|||
|
||||
/* Allocate completion queue itself */
|
||||
hermon_cq->cqe_size = ( cq->num_cqes * sizeof ( hermon_cq->cqe[0] ) );
|
||||
hermon_cq->cqe = malloc_dma ( hermon_cq->cqe_size,
|
||||
sizeof ( hermon_cq->cqe[0] ) );
|
||||
hermon_cq->cqe = malloc_phys ( hermon_cq->cqe_size,
|
||||
sizeof ( hermon_cq->cqe[0] ) );
|
||||
if ( ! hermon_cq->cqe ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_cqe;
|
||||
|
@ -925,9 +925,9 @@ static int hermon_create_cq ( struct ib_device *ibdev,
|
|||
err_sw2hw_cq:
|
||||
hermon_free_mtt ( hermon, &hermon_cq->mtt );
|
||||
err_alloc_mtt:
|
||||
free_dma ( hermon_cq->cqe, hermon_cq->cqe_size );
|
||||
free_phys ( hermon_cq->cqe, hermon_cq->cqe_size );
|
||||
err_cqe:
|
||||
free_dma ( hermon_cq->doorbell, sizeof ( hermon_cq->doorbell[0] ) );
|
||||
free_phys ( hermon_cq->doorbell, sizeof ( hermon_cq->doorbell[0] ) );
|
||||
err_doorbell:
|
||||
free ( hermon_cq );
|
||||
err_hermon_cq:
|
||||
|
@ -962,8 +962,8 @@ static void hermon_destroy_cq ( struct ib_device *ibdev,
|
|||
hermon_free_mtt ( hermon, &hermon_cq->mtt );
|
||||
|
||||
/* Free memory */
|
||||
free_dma ( hermon_cq->cqe, hermon_cq->cqe_size );
|
||||
free_dma ( hermon_cq->doorbell, sizeof ( hermon_cq->doorbell[0] ) );
|
||||
free_phys ( hermon_cq->cqe, hermon_cq->cqe_size );
|
||||
free_phys ( hermon_cq->doorbell, sizeof ( hermon_cq->doorbell[0] ) );
|
||||
free ( hermon_cq );
|
||||
|
||||
/* Mark queue number as free */
|
||||
|
@ -1128,8 +1128,8 @@ static int hermon_create_qp ( struct ib_device *ibdev,
|
|||
|
||||
/* Allocate doorbells */
|
||||
hermon_qp->recv.doorbell =
|
||||
malloc_dma ( sizeof ( hermon_qp->recv.doorbell[0] ),
|
||||
sizeof ( hermon_qp->recv.doorbell[0] ) );
|
||||
malloc_phys ( sizeof ( hermon_qp->recv.doorbell[0] ),
|
||||
sizeof ( hermon_qp->recv.doorbell[0] ) );
|
||||
if ( ! hermon_qp->recv.doorbell ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_recv_doorbell;
|
||||
|
@ -1157,8 +1157,8 @@ static int hermon_create_qp ( struct ib_device *ibdev,
|
|||
hermon_qp->wqe_size = ( hermon_qp->send.wqe_size +
|
||||
hermon_qp->recv.wqe_size +
|
||||
hermon_qp->recv.grh_size );
|
||||
hermon_qp->wqe = malloc_dma ( hermon_qp->wqe_size,
|
||||
sizeof ( hermon_qp->send.wqe[0] ) );
|
||||
hermon_qp->wqe = malloc_phys ( hermon_qp->wqe_size,
|
||||
sizeof ( hermon_qp->send.wqe[0] ) );
|
||||
if ( ! hermon_qp->wqe ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc_wqe;
|
||||
|
@ -1248,10 +1248,10 @@ static int hermon_create_qp ( struct ib_device *ibdev,
|
|||
err_rst2init_qp:
|
||||
hermon_free_mtt ( hermon, &hermon_qp->mtt );
|
||||
err_alloc_mtt:
|
||||
free_dma ( hermon_qp->wqe, hermon_qp->wqe_size );
|
||||
free_phys ( hermon_qp->wqe, hermon_qp->wqe_size );
|
||||
err_alloc_wqe:
|
||||
free_dma ( hermon_qp->recv.doorbell,
|
||||
sizeof ( hermon_qp->recv.doorbell[0] ) );
|
||||
free_phys ( hermon_qp->recv.doorbell,
|
||||
sizeof ( hermon_qp->recv.doorbell[0] ) );
|
||||
err_recv_doorbell:
|
||||
free ( hermon_qp );
|
||||
err_hermon_qp:
|
||||
|
@ -1363,9 +1363,9 @@ static void hermon_destroy_qp ( struct ib_device *ibdev,
|
|||
hermon_free_mtt ( hermon, &hermon_qp->mtt );
|
||||
|
||||
/* Free memory */
|
||||
free_dma ( hermon_qp->wqe, hermon_qp->wqe_size );
|
||||
free_dma ( hermon_qp->recv.doorbell,
|
||||
sizeof ( hermon_qp->recv.doorbell[0] ) );
|
||||
free_phys ( hermon_qp->wqe, hermon_qp->wqe_size );
|
||||
free_phys ( hermon_qp->recv.doorbell,
|
||||
sizeof ( hermon_qp->recv.doorbell[0] ) );
|
||||
free ( hermon_qp );
|
||||
|
||||
/* Mark queue number as free */
|
||||
|
@ -1887,8 +1887,8 @@ static int hermon_create_eq ( struct hermon *hermon ) {
|
|||
/* Allocate event queue itself */
|
||||
hermon_eq->eqe_size =
|
||||
( HERMON_NUM_EQES * sizeof ( hermon_eq->eqe[0] ) );
|
||||
hermon_eq->eqe = malloc_dma ( hermon_eq->eqe_size,
|
||||
sizeof ( hermon_eq->eqe[0] ) );
|
||||
hermon_eq->eqe = malloc_phys ( hermon_eq->eqe_size,
|
||||
sizeof ( hermon_eq->eqe[0] ) );
|
||||
if ( ! hermon_eq->eqe ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_eqe;
|
||||
|
@ -1946,7 +1946,7 @@ static int hermon_create_eq ( struct hermon *hermon ) {
|
|||
err_sw2hw_eq:
|
||||
hermon_free_mtt ( hermon, &hermon_eq->mtt );
|
||||
err_alloc_mtt:
|
||||
free_dma ( hermon_eq->eqe, hermon_eq->eqe_size );
|
||||
free_phys ( hermon_eq->eqe, hermon_eq->eqe_size );
|
||||
err_eqe:
|
||||
memset ( hermon_eq, 0, sizeof ( *hermon_eq ) );
|
||||
return rc;
|
||||
|
@ -1986,7 +1986,7 @@ static void hermon_destroy_eq ( struct hermon *hermon ) {
|
|||
hermon_free_mtt ( hermon, &hermon_eq->mtt );
|
||||
|
||||
/* Free memory */
|
||||
free_dma ( hermon_eq->eqe, hermon_eq->eqe_size );
|
||||
free_phys ( hermon_eq->eqe, hermon_eq->eqe_size );
|
||||
memset ( hermon_eq, 0, sizeof ( *hermon_eq ) );
|
||||
}
|
||||
|
||||
|
@ -3736,20 +3736,20 @@ static struct hermon * hermon_alloc ( void ) {
|
|||
goto err_hermon;
|
||||
|
||||
/* Allocate space for mailboxes */
|
||||
hermon->mailbox_in = malloc_dma ( HERMON_MBOX_SIZE,
|
||||
HERMON_MBOX_ALIGN );
|
||||
hermon->mailbox_in = malloc_phys ( HERMON_MBOX_SIZE,
|
||||
HERMON_MBOX_ALIGN );
|
||||
if ( ! hermon->mailbox_in )
|
||||
goto err_mailbox_in;
|
||||
hermon->mailbox_out = malloc_dma ( HERMON_MBOX_SIZE,
|
||||
HERMON_MBOX_ALIGN );
|
||||
hermon->mailbox_out = malloc_phys ( HERMON_MBOX_SIZE,
|
||||
HERMON_MBOX_ALIGN );
|
||||
if ( ! hermon->mailbox_out )
|
||||
goto err_mailbox_out;
|
||||
|
||||
return hermon;
|
||||
|
||||
free_dma ( hermon->mailbox_out, HERMON_MBOX_SIZE );
|
||||
free_phys ( hermon->mailbox_out, HERMON_MBOX_SIZE );
|
||||
err_mailbox_out:
|
||||
free_dma ( hermon->mailbox_in, HERMON_MBOX_SIZE );
|
||||
free_phys ( hermon->mailbox_in, HERMON_MBOX_SIZE );
|
||||
err_mailbox_in:
|
||||
free ( hermon );
|
||||
err_hermon:
|
||||
|
@ -3765,8 +3765,8 @@ static void hermon_free ( struct hermon *hermon ) {
|
|||
|
||||
ufree ( hermon->icm );
|
||||
ufree ( hermon->firmware_area );
|
||||
free_dma ( hermon->mailbox_out, HERMON_MBOX_SIZE );
|
||||
free_dma ( hermon->mailbox_in, HERMON_MBOX_SIZE );
|
||||
free_phys ( hermon->mailbox_out, HERMON_MBOX_SIZE );
|
||||
free_phys ( hermon->mailbox_in, HERMON_MBOX_SIZE );
|
||||
free ( hermon );
|
||||
}
|
||||
|
||||
|
|
|
@ -531,8 +531,8 @@ static int linda_init_send ( struct linda *linda ) {
|
|||
linda->send_buf[i] = i;
|
||||
|
||||
/* Allocate space for the SendBufAvail array */
|
||||
linda->sendbufavail = malloc_dma ( sizeof ( *linda->sendbufavail ),
|
||||
LINDA_SENDBUFAVAIL_ALIGN );
|
||||
linda->sendbufavail = malloc_phys ( sizeof ( *linda->sendbufavail ),
|
||||
LINDA_SENDBUFAVAIL_ALIGN );
|
||||
if ( ! linda->sendbufavail ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc_sendbufavail;
|
||||
|
@ -555,7 +555,7 @@ static int linda_init_send ( struct linda *linda ) {
|
|||
|
||||
return 0;
|
||||
|
||||
free_dma ( linda->sendbufavail, sizeof ( *linda->sendbufavail ) );
|
||||
free_phys ( linda->sendbufavail, sizeof ( *linda->sendbufavail ) );
|
||||
err_alloc_sendbufavail:
|
||||
return rc;
|
||||
}
|
||||
|
@ -576,7 +576,7 @@ static void linda_fini_send ( struct linda *linda ) {
|
|||
/* Ensure hardware has seen this disable */
|
||||
linda_readq ( linda, &sendctrl, QIB_7220_SendCtrl_offset );
|
||||
|
||||
free_dma ( linda->sendbufavail, sizeof ( *linda->sendbufavail ) );
|
||||
free_phys ( linda->sendbufavail, sizeof ( *linda->sendbufavail ) );
|
||||
}
|
||||
|
||||
/***************************************************************************
|
||||
|
@ -613,8 +613,8 @@ static int linda_create_recv_wq ( struct linda *linda,
|
|||
linda_wq->eager_cons = 0;
|
||||
|
||||
/* Allocate receive header buffer */
|
||||
linda_wq->header = malloc_dma ( LINDA_RECV_HEADERS_SIZE,
|
||||
LINDA_RECV_HEADERS_ALIGN );
|
||||
linda_wq->header = malloc_phys ( LINDA_RECV_HEADERS_SIZE,
|
||||
LINDA_RECV_HEADERS_ALIGN );
|
||||
if ( ! linda_wq->header ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc_header;
|
||||
|
@ -650,7 +650,7 @@ static int linda_create_recv_wq ( struct linda *linda,
|
|||
virt_to_bus ( &linda_wq->header_prod ) );
|
||||
return 0;
|
||||
|
||||
free_dma ( linda_wq->header, LINDA_RECV_HEADERS_SIZE );
|
||||
free_phys ( linda_wq->header, LINDA_RECV_HEADERS_SIZE );
|
||||
err_alloc_header:
|
||||
return rc;
|
||||
}
|
||||
|
@ -679,7 +679,7 @@ static void linda_destroy_recv_wq ( struct linda *linda,
|
|||
mb();
|
||||
|
||||
/* Free headers ring */
|
||||
free_dma ( linda_wq->header, LINDA_RECV_HEADERS_SIZE );
|
||||
free_phys ( linda_wq->header, LINDA_RECV_HEADERS_SIZE );
|
||||
|
||||
/* Free context */
|
||||
linda_free_ctx ( linda, ctx );
|
||||
|
|
|
@ -61,7 +61,7 @@ mlx_memory_alloc_dma_priv(
|
|||
)
|
||||
{
|
||||
mlx_status status = MLX_SUCCESS;
|
||||
*ptr = malloc_dma(size, align);
|
||||
*ptr = malloc_phys(size, align);
|
||||
if (*ptr == NULL) {
|
||||
status = MLX_OUT_OF_RESOURCES;
|
||||
} else {
|
||||
|
@ -78,7 +78,7 @@ mlx_memory_free_dma_priv(
|
|||
)
|
||||
{
|
||||
mlx_status status = MLX_SUCCESS;
|
||||
free_dma(ptr, size);
|
||||
free_phys(ptr, size);
|
||||
return status;
|
||||
}
|
||||
mlx_status
|
||||
|
|
|
@ -669,8 +669,8 @@ static int qib7322_init_send ( struct qib7322 *qib7322 ) {
|
|||
}
|
||||
|
||||
/* Allocate space for the SendBufAvail array */
|
||||
qib7322->sendbufavail = malloc_dma ( sizeof ( *qib7322->sendbufavail ),
|
||||
QIB7322_SENDBUFAVAIL_ALIGN );
|
||||
qib7322->sendbufavail = malloc_phys ( sizeof ( *qib7322->sendbufavail ),
|
||||
QIB7322_SENDBUFAVAIL_ALIGN );
|
||||
if ( ! qib7322->sendbufavail ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc_sendbufavail;
|
||||
|
@ -697,7 +697,7 @@ static int qib7322_init_send ( struct qib7322 *qib7322 ) {
|
|||
|
||||
return 0;
|
||||
|
||||
free_dma ( qib7322->sendbufavail, sizeof ( *qib7322->sendbufavail ) );
|
||||
free_phys ( qib7322->sendbufavail, sizeof ( *qib7322->sendbufavail ) );
|
||||
err_alloc_sendbufavail:
|
||||
qib7322_destroy_send_bufs ( qib7322, qib7322->send_bufs_vl15_port1 );
|
||||
err_create_send_bufs_vl15_port1:
|
||||
|
@ -724,7 +724,7 @@ static void qib7322_fini_send ( struct qib7322 *qib7322 ) {
|
|||
/* Ensure hardware has seen this disable */
|
||||
qib7322_readq ( qib7322, &sendctrl, QIB_7322_SendCtrl_offset );
|
||||
|
||||
free_dma ( qib7322->sendbufavail, sizeof ( *qib7322->sendbufavail ) );
|
||||
free_phys ( qib7322->sendbufavail, sizeof ( *qib7322->sendbufavail ) );
|
||||
qib7322_destroy_send_bufs ( qib7322, qib7322->send_bufs_vl15_port1 );
|
||||
qib7322_destroy_send_bufs ( qib7322, qib7322->send_bufs_vl15_port0 );
|
||||
qib7322_destroy_send_bufs ( qib7322, qib7322->send_bufs_small );
|
||||
|
@ -767,8 +767,8 @@ static int qib7322_create_recv_wq ( struct ib_device *ibdev,
|
|||
qib7322_wq->eager_cons = 0;
|
||||
|
||||
/* Allocate receive header buffer */
|
||||
qib7322_wq->header = malloc_dma ( QIB7322_RECV_HEADERS_SIZE,
|
||||
QIB7322_RECV_HEADERS_ALIGN );
|
||||
qib7322_wq->header = malloc_phys ( QIB7322_RECV_HEADERS_SIZE,
|
||||
QIB7322_RECV_HEADERS_ALIGN );
|
||||
if ( ! qib7322_wq->header ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc_header;
|
||||
|
@ -810,7 +810,7 @@ static int qib7322_create_recv_wq ( struct ib_device *ibdev,
|
|||
virt_to_bus ( &qib7322_wq->header_prod ) );
|
||||
return 0;
|
||||
|
||||
free_dma ( qib7322_wq->header, QIB7322_RECV_HEADERS_SIZE );
|
||||
free_phys ( qib7322_wq->header, QIB7322_RECV_HEADERS_SIZE );
|
||||
err_alloc_header:
|
||||
return rc;
|
||||
}
|
||||
|
@ -846,7 +846,7 @@ static void qib7322_destroy_recv_wq ( struct ib_device *ibdev,
|
|||
mb();
|
||||
|
||||
/* Free headers ring */
|
||||
free_dma ( qib7322_wq->header, QIB7322_RECV_HEADERS_SIZE );
|
||||
free_phys ( qib7322_wq->header, QIB7322_RECV_HEADERS_SIZE );
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -249,7 +249,7 @@ static int a3c90x_setup_tx_ring(struct INF_3C90X *p)
|
|||
{
|
||||
DBGP("a3c90x_setup_tx_ring\n");
|
||||
p->tx_ring =
|
||||
malloc_dma(TX_RING_SIZE * sizeof(struct TXD), TX_RING_ALIGN);
|
||||
malloc_phys(TX_RING_SIZE * sizeof(struct TXD), TX_RING_ALIGN);
|
||||
|
||||
if (!p->tx_ring) {
|
||||
DBG("Could not allocate TX-ring\n");
|
||||
|
@ -304,7 +304,7 @@ static void a3c90x_free_tx_ring(struct INF_3C90X *p)
|
|||
{
|
||||
DBGP("a3c90x_free_tx_ring\n");
|
||||
|
||||
free_dma(p->tx_ring, TX_RING_SIZE * sizeof(struct TXD));
|
||||
free_phys(p->tx_ring, TX_RING_SIZE * sizeof(struct TXD));
|
||||
p->tx_ring = NULL;
|
||||
/* io_buffers are free()ed by netdev_tx_complete[,_err]() */
|
||||
}
|
||||
|
@ -461,7 +461,7 @@ static int a3c90x_setup_rx_ring(struct INF_3C90X *p)
|
|||
DBGP("a3c90x_setup_rx_ring\n");
|
||||
|
||||
p->rx_ring =
|
||||
malloc_dma(RX_RING_SIZE * sizeof(struct RXD), RX_RING_ALIGN);
|
||||
malloc_phys(RX_RING_SIZE * sizeof(struct RXD), RX_RING_ALIGN);
|
||||
|
||||
if (!p->rx_ring) {
|
||||
DBG("Could not allocate RX-ring\n");
|
||||
|
@ -491,7 +491,7 @@ static void a3c90x_free_rx_ring(struct INF_3C90X *p)
|
|||
{
|
||||
DBGP("a3c90x_free_rx_ring\n");
|
||||
|
||||
free_dma(p->rx_ring, RX_RING_SIZE * sizeof(struct RXD));
|
||||
free_phys(p->rx_ring, RX_RING_SIZE * sizeof(struct RXD));
|
||||
p->rx_ring = NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -877,7 +877,7 @@ ath5k_desc_alloc(struct ath5k_softc *sc)
|
|||
|
||||
/* allocate descriptors */
|
||||
sc->desc_len = sizeof(struct ath5k_desc) * (ATH_TXBUF + ATH_RXBUF + 1);
|
||||
sc->desc = malloc_dma(sc->desc_len, ATH5K_DESC_ALIGN);
|
||||
sc->desc = malloc_phys(sc->desc_len, ATH5K_DESC_ALIGN);
|
||||
if (sc->desc == NULL) {
|
||||
DBG("ath5k: can't allocate descriptors\n");
|
||||
ret = -ENOMEM;
|
||||
|
@ -915,7 +915,7 @@ ath5k_desc_alloc(struct ath5k_softc *sc)
|
|||
return 0;
|
||||
|
||||
err_free:
|
||||
free_dma(sc->desc, sc->desc_len);
|
||||
free_phys(sc->desc, sc->desc_len);
|
||||
err:
|
||||
sc->desc = NULL;
|
||||
return ret;
|
||||
|
@ -932,7 +932,7 @@ ath5k_desc_free(struct ath5k_softc *sc)
|
|||
ath5k_rxbuf_free(sc, bf);
|
||||
|
||||
/* Free memory associated with all descriptors */
|
||||
free_dma(sc->desc, sc->desc_len);
|
||||
free_phys(sc->desc, sc->desc_len);
|
||||
|
||||
free(sc->bufptr);
|
||||
sc->bufptr = NULL;
|
||||
|
|
|
@ -223,7 +223,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
|
|||
}
|
||||
|
||||
/* allocate descriptors */
|
||||
dd->dd_desc = malloc_dma(dd->dd_desc_len, 16);
|
||||
dd->dd_desc = malloc_phys(dd->dd_desc_len, 16);
|
||||
if (dd->dd_desc == NULL) {
|
||||
error = -ENOMEM;
|
||||
goto fail;
|
||||
|
@ -264,7 +264,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
|
|||
}
|
||||
return 0;
|
||||
fail2:
|
||||
free_dma(dd->dd_desc, dd->dd_desc_len);
|
||||
free_phys(dd->dd_desc, dd->dd_desc_len);
|
||||
fail:
|
||||
memset(dd, 0, sizeof(*dd));
|
||||
return error;
|
||||
|
@ -588,7 +588,7 @@ void ath_descdma_cleanup(struct ath_softc *sc __unused,
|
|||
struct ath_descdma *dd,
|
||||
struct list_head *head)
|
||||
{
|
||||
free_dma(dd->dd_desc, dd->dd_desc_len);
|
||||
free_phys(dd->dd_desc, dd->dd_desc_len);
|
||||
|
||||
INIT_LIST_HEAD(head);
|
||||
free(dd->dd_bufptr);
|
||||
|
|
|
@ -370,7 +370,7 @@ static void atl1e_free_ring_resources(struct atl1e_adapter *adapter)
|
|||
atl1e_clean_rx_ring(adapter);
|
||||
|
||||
if (adapter->ring_vir_addr) {
|
||||
free_dma(adapter->ring_vir_addr, adapter->ring_size);
|
||||
free_phys(adapter->ring_vir_addr, adapter->ring_size);
|
||||
adapter->ring_vir_addr = NULL;
|
||||
adapter->ring_dma = 0;
|
||||
}
|
||||
|
@ -405,7 +405,7 @@ static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter)
|
|||
/* real ring DMA buffer */
|
||||
|
||||
size = adapter->ring_size;
|
||||
adapter->ring_vir_addr = malloc_dma(adapter->ring_size, 32);
|
||||
adapter->ring_vir_addr = malloc_phys(adapter->ring_size, 32);
|
||||
|
||||
if (adapter->ring_vir_addr == NULL) {
|
||||
DBG("atl1e: out of memory allocating %d bytes for %s ring\n",
|
||||
|
|
|
@ -436,7 +436,7 @@ static void b44_free_rx_ring(struct b44_private *bp)
|
|||
free_iob(bp->rx_iobuf[i]);
|
||||
bp->rx_iobuf[i] = NULL;
|
||||
}
|
||||
free_dma(bp->rx, B44_RX_RING_LEN_BYTES);
|
||||
free_phys(bp->rx, B44_RX_RING_LEN_BYTES);
|
||||
bp->rx = NULL;
|
||||
}
|
||||
}
|
||||
|
@ -446,11 +446,11 @@ static int b44_init_rx_ring(struct b44_private *bp)
|
|||
{
|
||||
b44_free_rx_ring(bp);
|
||||
|
||||
bp->rx = malloc_dma(B44_RX_RING_LEN_BYTES, B44_DMA_ALIGNMENT);
|
||||
bp->rx = malloc_phys(B44_RX_RING_LEN_BYTES, B44_DMA_ALIGNMENT);
|
||||
if (!bp->rx)
|
||||
return -ENOMEM;
|
||||
if (!b44_address_ok(bp->rx)) {
|
||||
free_dma(bp->rx, B44_RX_RING_LEN_BYTES);
|
||||
free_phys(bp->rx, B44_RX_RING_LEN_BYTES);
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
|
@ -468,7 +468,7 @@ static int b44_init_rx_ring(struct b44_private *bp)
|
|||
static void b44_free_tx_ring(struct b44_private *bp)
|
||||
{
|
||||
if (bp->tx) {
|
||||
free_dma(bp->tx, B44_TX_RING_LEN_BYTES);
|
||||
free_phys(bp->tx, B44_TX_RING_LEN_BYTES);
|
||||
bp->tx = NULL;
|
||||
}
|
||||
}
|
||||
|
@ -478,11 +478,11 @@ static int b44_init_tx_ring(struct b44_private *bp)
|
|||
{
|
||||
b44_free_tx_ring(bp);
|
||||
|
||||
bp->tx = malloc_dma(B44_TX_RING_LEN_BYTES, B44_DMA_ALIGNMENT);
|
||||
bp->tx = malloc_phys(B44_TX_RING_LEN_BYTES, B44_DMA_ALIGNMENT);
|
||||
if (!bp->tx)
|
||||
return -ENOMEM;
|
||||
if (!b44_address_ok(bp->tx)) {
|
||||
free_dma(bp->tx, B44_TX_RING_LEN_BYTES);
|
||||
free_phys(bp->tx, B44_TX_RING_LEN_BYTES);
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
|
|
|
@ -495,39 +495,39 @@ void bnxt_free_mem ( struct bnxt *bp )
|
|||
{
|
||||
DBGP ( "%s\n", __func__ );
|
||||
if ( bp->nq.bd_virt ) {
|
||||
free_dma ( bp->nq.bd_virt, NQ_RING_BUFFER_SIZE );
|
||||
free_phys ( bp->nq.bd_virt, NQ_RING_BUFFER_SIZE );
|
||||
bp->nq.bd_virt = NULL;
|
||||
}
|
||||
|
||||
if ( bp->cq.bd_virt ) {
|
||||
free_dma ( bp->cq.bd_virt, CQ_RING_BUFFER_SIZE );
|
||||
free_phys ( bp->cq.bd_virt, CQ_RING_BUFFER_SIZE );
|
||||
bp->cq.bd_virt = NULL;
|
||||
}
|
||||
|
||||
if ( bp->rx.bd_virt ) {
|
||||
free_dma ( bp->rx.bd_virt, RX_RING_BUFFER_SIZE );
|
||||
free_phys ( bp->rx.bd_virt, RX_RING_BUFFER_SIZE );
|
||||
bp->rx.bd_virt = NULL;
|
||||
}
|
||||
|
||||
if ( bp->tx.bd_virt ) {
|
||||
free_dma ( bp->tx.bd_virt, TX_RING_BUFFER_SIZE );
|
||||
free_phys ( bp->tx.bd_virt, TX_RING_BUFFER_SIZE );
|
||||
bp->tx.bd_virt = NULL;
|
||||
}
|
||||
|
||||
if ( bp->hwrm_addr_dma ) {
|
||||
free_dma ( bp->hwrm_addr_dma, DMA_BUFFER_SIZE );
|
||||
free_phys ( bp->hwrm_addr_dma, DMA_BUFFER_SIZE );
|
||||
bp->dma_addr_mapping = 0;
|
||||
bp->hwrm_addr_dma = NULL;
|
||||
}
|
||||
|
||||
if ( bp->hwrm_addr_resp ) {
|
||||
free_dma ( bp->hwrm_addr_resp, RESP_BUFFER_SIZE );
|
||||
free_phys ( bp->hwrm_addr_resp, RESP_BUFFER_SIZE );
|
||||
bp->resp_addr_mapping = 0;
|
||||
bp->hwrm_addr_resp = NULL;
|
||||
}
|
||||
|
||||
if ( bp->hwrm_addr_req ) {
|
||||
free_dma ( bp->hwrm_addr_req, REQ_BUFFER_SIZE );
|
||||
free_phys ( bp->hwrm_addr_req, REQ_BUFFER_SIZE );
|
||||
bp->req_addr_mapping = 0;
|
||||
bp->hwrm_addr_req = NULL;
|
||||
}
|
||||
|
@ -537,14 +537,14 @@ void bnxt_free_mem ( struct bnxt *bp )
|
|||
int bnxt_alloc_mem ( struct bnxt *bp )
|
||||
{
|
||||
DBGP ( "%s\n", __func__ );
|
||||
bp->hwrm_addr_req = malloc_dma ( REQ_BUFFER_SIZE, BNXT_DMA_ALIGNMENT );
|
||||
bp->hwrm_addr_resp = malloc_dma ( RESP_BUFFER_SIZE,
|
||||
BNXT_DMA_ALIGNMENT );
|
||||
bp->hwrm_addr_dma = malloc_dma ( DMA_BUFFER_SIZE, BNXT_DMA_ALIGNMENT );
|
||||
bp->tx.bd_virt = malloc_dma ( TX_RING_BUFFER_SIZE, DMA_ALIGN_4K );
|
||||
bp->rx.bd_virt = malloc_dma ( RX_RING_BUFFER_SIZE, DMA_ALIGN_4K );
|
||||
bp->cq.bd_virt = malloc_dma ( CQ_RING_BUFFER_SIZE, BNXT_DMA_ALIGNMENT );
|
||||
bp->nq.bd_virt = malloc_dma ( NQ_RING_BUFFER_SIZE, BNXT_DMA_ALIGNMENT );
|
||||
bp->hwrm_addr_req = malloc_phys ( REQ_BUFFER_SIZE, BNXT_DMA_ALIGNMENT );
|
||||
bp->hwrm_addr_resp = malloc_phys ( RESP_BUFFER_SIZE,
|
||||
BNXT_DMA_ALIGNMENT );
|
||||
bp->hwrm_addr_dma = malloc_phys ( DMA_BUFFER_SIZE, BNXT_DMA_ALIGNMENT );
|
||||
bp->tx.bd_virt = malloc_phys ( TX_RING_BUFFER_SIZE, DMA_ALIGN_4K );
|
||||
bp->rx.bd_virt = malloc_phys ( RX_RING_BUFFER_SIZE, DMA_ALIGN_4K );
|
||||
bp->cq.bd_virt = malloc_phys ( CQ_RING_BUFFER_SIZE, BNXT_DMA_ALIGNMENT );
|
||||
bp->nq.bd_virt = malloc_phys ( NQ_RING_BUFFER_SIZE, BNXT_DMA_ALIGNMENT );
|
||||
test_if ( bp->hwrm_addr_req &&
|
||||
bp->hwrm_addr_resp &&
|
||||
bp->hwrm_addr_dma &&
|
||||
|
|
|
@ -93,7 +93,7 @@ FILE_LICENCE ( GPL2_OR_LATER );
|
|||
|
||||
/*
|
||||
* Debugging levels:
|
||||
* - DBG() is for any errors, i.e. failed alloc_iob(), malloc_dma(),
|
||||
* - DBG() is for any errors, i.e. failed alloc_iob(), malloc_phys(),
|
||||
* TX overflow, corrupted packets, ...
|
||||
* - DBG2() is for successful events, like packet received,
|
||||
* packet transmitted, and other general notifications.
|
||||
|
@ -335,7 +335,7 @@ static int ifec_net_open ( struct net_device *netdev )
|
|||
ifec_mdio_setup ( netdev, options );
|
||||
|
||||
/* Prepare MAC address w/ Individual Address Setup (ias) command.*/
|
||||
ias = malloc_dma ( sizeof ( *ias ), CB_ALIGN );
|
||||
ias = malloc_phys ( sizeof ( *ias ), CB_ALIGN );
|
||||
if ( !ias ) {
|
||||
rc = -ENOMEM;
|
||||
goto error;
|
||||
|
@ -345,7 +345,7 @@ static int ifec_net_open ( struct net_device *netdev )
|
|||
memcpy ( ias->ia, netdev->ll_addr, ETH_ALEN );
|
||||
|
||||
/* Prepare operating parameters w/ a configure command. */
|
||||
cfg = malloc_dma ( sizeof ( *cfg ), CB_ALIGN );
|
||||
cfg = malloc_phys ( sizeof ( *cfg ), CB_ALIGN );
|
||||
if ( !cfg ) {
|
||||
rc = -ENOMEM;
|
||||
goto error;
|
||||
|
@ -367,8 +367,8 @@ static int ifec_net_open ( struct net_device *netdev )
|
|||
DBG ( "Failed to initiate!\n" );
|
||||
goto error;
|
||||
}
|
||||
free_dma ( ias, sizeof ( *ias ) );
|
||||
free_dma ( cfg, sizeof ( *cfg ) );
|
||||
free_phys ( ias, sizeof ( *ias ) );
|
||||
free_phys ( cfg, sizeof ( *cfg ) );
|
||||
DBG2 ( "cfg " );
|
||||
|
||||
/* Enable rx by sending ring address to card */
|
||||
|
@ -381,8 +381,8 @@ static int ifec_net_open ( struct net_device *netdev )
|
|||
return 0;
|
||||
|
||||
error:
|
||||
free_dma ( cfg, sizeof ( *cfg ) );
|
||||
free_dma ( ias, sizeof ( *ias ) );
|
||||
free_phys ( cfg, sizeof ( *cfg ) );
|
||||
free_phys ( ias, sizeof ( *ias ) );
|
||||
ifec_free ( netdev );
|
||||
ifec_reset ( netdev );
|
||||
return rc;
|
||||
|
@ -703,7 +703,7 @@ static void ifec_free ( struct net_device *netdev )
|
|||
}
|
||||
|
||||
/* free TX ring buffer */
|
||||
free_dma ( priv->tcbs, TX_RING_BYTES );
|
||||
free_phys ( priv->tcbs, TX_RING_BYTES );
|
||||
|
||||
priv->tcbs = NULL;
|
||||
}
|
||||
|
@ -1025,7 +1025,7 @@ static int ifec_tx_setup ( struct net_device *netdev )
|
|||
DBGP ( "ifec_tx_setup\n" );
|
||||
|
||||
/* allocate tx ring */
|
||||
priv->tcbs = malloc_dma ( TX_RING_BYTES, CB_ALIGN );
|
||||
priv->tcbs = malloc_phys ( TX_RING_BYTES, CB_ALIGN );
|
||||
if ( !priv->tcbs ) {
|
||||
DBG ( "TX-ring allocation failed\n" );
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -164,7 +164,7 @@ static int ena_create_admin ( struct ena_nic *ena ) {
|
|||
int rc;
|
||||
|
||||
/* Allocate admin completion queue */
|
||||
ena->acq.rsp = malloc_dma ( acq_len, acq_len );
|
||||
ena->acq.rsp = malloc_phys ( acq_len, acq_len );
|
||||
if ( ! ena->acq.rsp ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc_acq;
|
||||
|
@ -172,7 +172,7 @@ static int ena_create_admin ( struct ena_nic *ena ) {
|
|||
memset ( ena->acq.rsp, 0, acq_len );
|
||||
|
||||
/* Allocate admin queue */
|
||||
ena->aq.req = malloc_dma ( aq_len, aq_len );
|
||||
ena->aq.req = malloc_phys ( aq_len, aq_len );
|
||||
if ( ! ena->aq.req ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc_aq;
|
||||
|
@ -196,9 +196,9 @@ static int ena_create_admin ( struct ena_nic *ena ) {
|
|||
|
||||
ena_clear_caps ( ena, ENA_AQ_CAPS );
|
||||
ena_clear_caps ( ena, ENA_ACQ_CAPS );
|
||||
free_dma ( ena->aq.req, aq_len );
|
||||
free_phys ( ena->aq.req, aq_len );
|
||||
err_alloc_aq:
|
||||
free_dma ( ena->acq.rsp, acq_len );
|
||||
free_phys ( ena->acq.rsp, acq_len );
|
||||
err_alloc_acq:
|
||||
return rc;
|
||||
}
|
||||
|
@ -218,8 +218,8 @@ static void ena_destroy_admin ( struct ena_nic *ena ) {
|
|||
wmb();
|
||||
|
||||
/* Free queues */
|
||||
free_dma ( ena->aq.req, aq_len );
|
||||
free_dma ( ena->acq.rsp, acq_len );
|
||||
free_phys ( ena->aq.req, aq_len );
|
||||
free_phys ( ena->acq.rsp, acq_len );
|
||||
DBGC ( ena, "ENA %p AQ and ACQ destroyed\n", ena );
|
||||
}
|
||||
|
||||
|
@ -338,7 +338,7 @@ static int ena_create_sq ( struct ena_nic *ena, struct ena_sq *sq,
|
|||
int rc;
|
||||
|
||||
/* Allocate submission queue entries */
|
||||
sq->sqe.raw = malloc_dma ( sq->len, ENA_ALIGN );
|
||||
sq->sqe.raw = malloc_phys ( sq->len, ENA_ALIGN );
|
||||
if ( ! sq->sqe.raw ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc;
|
||||
|
@ -375,7 +375,7 @@ static int ena_create_sq ( struct ena_nic *ena, struct ena_sq *sq,
|
|||
return 0;
|
||||
|
||||
err_admin:
|
||||
free_dma ( sq->sqe.raw, sq->len );
|
||||
free_phys ( sq->sqe.raw, sq->len );
|
||||
err_alloc:
|
||||
return rc;
|
||||
}
|
||||
|
@ -403,7 +403,7 @@ static int ena_destroy_sq ( struct ena_nic *ena, struct ena_sq *sq ) {
|
|||
return rc;
|
||||
|
||||
/* Free submission queue entries */
|
||||
free_dma ( sq->sqe.raw, sq->len );
|
||||
free_phys ( sq->sqe.raw, sq->len );
|
||||
|
||||
DBGC ( ena, "ENA %p %s SQ%d destroyed\n",
|
||||
ena, ena_direction ( sq->direction ), sq->id );
|
||||
|
@ -423,7 +423,7 @@ static int ena_create_cq ( struct ena_nic *ena, struct ena_cq *cq ) {
|
|||
int rc;
|
||||
|
||||
/* Allocate completion queue entries */
|
||||
cq->cqe.raw = malloc_dma ( cq->len, ENA_ALIGN );
|
||||
cq->cqe.raw = malloc_phys ( cq->len, ENA_ALIGN );
|
||||
if ( ! cq->cqe.raw ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc;
|
||||
|
@ -461,7 +461,7 @@ static int ena_create_cq ( struct ena_nic *ena, struct ena_cq *cq ) {
|
|||
return 0;
|
||||
|
||||
err_admin:
|
||||
free_dma ( cq->cqe.raw, cq->len );
|
||||
free_phys ( cq->cqe.raw, cq->len );
|
||||
err_alloc:
|
||||
return rc;
|
||||
}
|
||||
|
@ -488,7 +488,7 @@ static int ena_destroy_cq ( struct ena_nic *ena, struct ena_cq *cq ) {
|
|||
return rc;
|
||||
|
||||
/* Free completion queue entries */
|
||||
free_dma ( cq->cqe.raw, cq->len );
|
||||
free_phys ( cq->cqe.raw, cq->len );
|
||||
|
||||
DBGC ( ena, "ENA %p CQ%d destroyed\n", ena, cq->id );
|
||||
return 0;
|
||||
|
|
|
@ -3025,7 +3025,7 @@ falcon_free_special_buffer ( void *p )
|
|||
{
|
||||
/* We don't bother cleaning up the buffer table entries -
|
||||
* we're hardly limited */
|
||||
free_dma ( p, EFAB_BUF_ALIGN );
|
||||
free_phys ( p, EFAB_BUF_ALIGN );
|
||||
}
|
||||
|
||||
static void*
|
||||
|
@ -3038,7 +3038,7 @@ falcon_alloc_special_buffer ( struct efab_nic *efab, int bytes,
|
|||
unsigned long dma_addr;
|
||||
|
||||
/* Allocate the buffer, aligned on a buffer address boundary */
|
||||
buffer = malloc_dma ( bytes, EFAB_BUF_ALIGN );
|
||||
buffer = malloc_phys ( bytes, EFAB_BUF_ALIGN );
|
||||
if ( ! buffer )
|
||||
return NULL;
|
||||
|
||||
|
|
|
@ -831,7 +831,7 @@ static int exanic_probe ( struct pci_device *pci ) {
|
|||
}
|
||||
|
||||
/* Allocate transmit feedback region (shared between all ports) */
|
||||
exanic->txf = malloc_dma ( EXANIC_TXF_LEN, EXANIC_ALIGN );
|
||||
exanic->txf = malloc_phys ( EXANIC_TXF_LEN, EXANIC_ALIGN );
|
||||
if ( ! exanic->txf ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc_txf;
|
||||
|
@ -853,7 +853,7 @@ static int exanic_probe ( struct pci_device *pci ) {
|
|||
for ( i-- ; i >= 0 ; i-- )
|
||||
exanic_remove_port ( exanic, i );
|
||||
exanic_reset ( exanic );
|
||||
free_dma ( exanic->txf, EXANIC_TXF_LEN );
|
||||
free_phys ( exanic->txf, EXANIC_TXF_LEN );
|
||||
err_alloc_txf:
|
||||
iounmap ( exanic->tx );
|
||||
err_ioremap_tx:
|
||||
|
@ -882,7 +882,7 @@ static void exanic_remove ( struct pci_device *pci ) {
|
|||
exanic_reset ( exanic );
|
||||
|
||||
/* Free transmit feedback region */
|
||||
free_dma ( exanic->txf, EXANIC_TXF_LEN );
|
||||
free_phys ( exanic->txf, EXANIC_TXF_LEN );
|
||||
|
||||
/* Unmap transmit region */
|
||||
iounmap ( exanic->tx );
|
||||
|
|
|
@ -267,7 +267,7 @@ nv_init_rings ( struct forcedeth_private *priv )
|
|||
|
||||
/* Allocate ring for both TX and RX */
|
||||
priv->rx_ring =
|
||||
malloc_dma ( sizeof(struct ring_desc) * RXTX_RING_SIZE, 32 );
|
||||
malloc_phys ( sizeof(struct ring_desc) * RXTX_RING_SIZE, 32 );
|
||||
if ( ! priv->rx_ring )
|
||||
goto err_malloc;
|
||||
priv->tx_ring = &priv->rx_ring[RX_RING_SIZE];
|
||||
|
@ -308,7 +308,7 @@ nv_free_rxtx_resources ( struct forcedeth_private *priv )
|
|||
|
||||
DBGP ( "nv_free_rxtx_resources\n" );
|
||||
|
||||
free_dma ( priv->rx_ring, sizeof(struct ring_desc) * RXTX_RING_SIZE );
|
||||
free_phys ( priv->rx_ring, sizeof(struct ring_desc) * RXTX_RING_SIZE );
|
||||
|
||||
for ( i = 0; i < RX_RING_SIZE; i++ ) {
|
||||
free_iob ( priv->rx_iobuf[i] );
|
||||
|
|
|
@ -343,7 +343,7 @@ static int icplus_create_ring ( struct icplus_nic *icp, struct icplus_ring *ring
|
|||
struct icplus_descriptor *next;
|
||||
|
||||
/* Allocate descriptor ring */
|
||||
ring->entry = malloc_dma ( len, ICP_ALIGN );
|
||||
ring->entry = malloc_phys ( len, ICP_ALIGN );
|
||||
if ( ! ring->entry ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc;
|
||||
|
@ -369,7 +369,7 @@ static int icplus_create_ring ( struct icplus_nic *icp, struct icplus_ring *ring
|
|||
( virt_to_bus ( ring->entry ) + len ) );
|
||||
return 0;
|
||||
|
||||
free_dma ( ring->entry, len );
|
||||
free_phys ( ring->entry, len );
|
||||
ring->entry = NULL;
|
||||
err_alloc:
|
||||
return rc;
|
||||
|
@ -386,7 +386,7 @@ static void icplus_destroy_ring ( struct icplus_nic *icp __unused,
|
|||
size_t len = ( sizeof ( ring->entry[0] ) * ICP_NUM_DESC );
|
||||
|
||||
/* Free descriptor ring */
|
||||
free_dma ( ring->entry, len );
|
||||
free_phys ( ring->entry, len );
|
||||
ring->entry = NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -46,7 +46,7 @@ int igbvf_setup_tx_resources ( struct igbvf_adapter *adapter )
|
|||
|
||||
/* Allocate transmit descriptor ring memory.
|
||||
It must not cross a 64K boundary because of hardware errata #23
|
||||
so we use malloc_dma() requesting a 128 byte block that is
|
||||
so we use malloc_phys() requesting a 128 byte block that is
|
||||
128 byte aligned. This should guarantee that the memory
|
||||
allocated will not cross a 64K boundary, because 128 is an
|
||||
even multiple of 65536 ( 65536 / 128 == 512 ), so all possible
|
||||
|
@ -55,7 +55,7 @@ int igbvf_setup_tx_resources ( struct igbvf_adapter *adapter )
|
|||
*/
|
||||
|
||||
adapter->tx_base =
|
||||
malloc_dma ( adapter->tx_ring_size, adapter->tx_ring_size );
|
||||
malloc_phys ( adapter->tx_ring_size, adapter->tx_ring_size );
|
||||
|
||||
if ( ! adapter->tx_base ) {
|
||||
return -ENOMEM;
|
||||
|
@ -78,7 +78,7 @@ void igbvf_free_tx_resources ( struct igbvf_adapter *adapter )
|
|||
{
|
||||
DBG ( "igbvf_free_tx_resources\n" );
|
||||
|
||||
free_dma ( adapter->tx_base, adapter->tx_ring_size );
|
||||
free_phys ( adapter->tx_base, adapter->tx_ring_size );
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -93,7 +93,7 @@ void igbvf_free_rx_resources ( struct igbvf_adapter *adapter )
|
|||
|
||||
DBG ( "igbvf_free_rx_resources\n" );
|
||||
|
||||
free_dma ( adapter->rx_base, adapter->rx_ring_size );
|
||||
free_phys ( adapter->rx_base, adapter->rx_ring_size );
|
||||
|
||||
for ( i = 0; i < NUM_RX_DESC; i++ ) {
|
||||
free_iob ( adapter->rx_iobuf[i] );
|
||||
|
@ -574,7 +574,7 @@ int igbvf_setup_rx_resources ( struct igbvf_adapter *adapter )
|
|||
*/
|
||||
|
||||
adapter->rx_base =
|
||||
malloc_dma ( adapter->rx_ring_size, adapter->rx_ring_size );
|
||||
malloc_phys ( adapter->rx_ring_size, adapter->rx_ring_size );
|
||||
|
||||
if ( ! adapter->rx_base ) {
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -504,7 +504,7 @@ int intel_create_ring ( struct intel_nic *intel, struct intel_ring *ring ) {
|
|||
* prevent any possible page-crossing errors due to hardware
|
||||
* errata.
|
||||
*/
|
||||
ring->desc = malloc_dma ( ring->len, ring->len );
|
||||
ring->desc = malloc_phys ( ring->len, ring->len );
|
||||
if ( ! ring->desc )
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -553,7 +553,7 @@ void intel_destroy_ring ( struct intel_nic *intel, struct intel_ring *ring ) {
|
|||
intel_reset_ring ( intel, ring->reg );
|
||||
|
||||
/* Free descriptor ring */
|
||||
free_dma ( ring->desc, ring->len );
|
||||
free_phys ( ring->desc, ring->len );
|
||||
ring->desc = NULL;
|
||||
ring->prod = 0;
|
||||
ring->cons = 0;
|
||||
|
|
|
@ -195,7 +195,7 @@ static int intelxl_alloc_admin ( struct intelxl_nic *intelxl,
|
|||
size_t len = ( sizeof ( admin->desc[0] ) * INTELXL_ADMIN_NUM_DESC );
|
||||
|
||||
/* Allocate admin queue */
|
||||
admin->buf = malloc_dma ( ( buf_len + len ), INTELXL_ALIGN );
|
||||
admin->buf = malloc_phys ( ( buf_len + len ), INTELXL_ALIGN );
|
||||
if ( ! admin->buf )
|
||||
return -ENOMEM;
|
||||
admin->desc = ( ( ( void * ) admin->buf ) + buf_len );
|
||||
|
@ -277,7 +277,7 @@ static void intelxl_free_admin ( struct intelxl_nic *intelxl __unused,
|
|||
size_t len = ( sizeof ( admin->desc[0] ) * INTELXL_ADMIN_NUM_DESC );
|
||||
|
||||
/* Free queue */
|
||||
free_dma ( admin->buf, ( buf_len + len ) );
|
||||
free_phys ( admin->buf, ( buf_len + len ) );
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -926,7 +926,7 @@ int intelxl_alloc_ring ( struct intelxl_nic *intelxl,
|
|||
int rc;
|
||||
|
||||
/* Allocate descriptor ring */
|
||||
ring->desc.raw = malloc_dma ( ring->len, INTELXL_ALIGN );
|
||||
ring->desc.raw = malloc_phys ( ring->len, INTELXL_ALIGN );
|
||||
if ( ! ring->desc.raw ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc;
|
||||
|
@ -950,7 +950,7 @@ int intelxl_alloc_ring ( struct intelxl_nic *intelxl,
|
|||
|
||||
return 0;
|
||||
|
||||
free_dma ( ring->desc.raw, ring->len );
|
||||
free_phys ( ring->desc.raw, ring->len );
|
||||
err_alloc:
|
||||
return rc;
|
||||
}
|
||||
|
@ -965,7 +965,7 @@ void intelxl_free_ring ( struct intelxl_nic *intelxl __unused,
|
|||
struct intelxl_ring *ring ) {
|
||||
|
||||
/* Free descriptor ring */
|
||||
free_dma ( ring->desc.raw, ring->len );
|
||||
free_phys ( ring->desc.raw, ring->len );
|
||||
ring->desc.raw = NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -262,7 +262,7 @@ jme_free_tx_resources(struct jme_adapter *jme)
|
|||
sizeof(struct io_buffer *) * jme->tx_ring_size);
|
||||
free(txring->bufinf);
|
||||
}
|
||||
free_dma(txring->desc, jme->tx_ring_size * TX_DESC_SIZE);
|
||||
free_phys(txring->desc, jme->tx_ring_size * TX_DESC_SIZE);
|
||||
txring->desc = NULL;
|
||||
txring->dma = 0;
|
||||
txring->bufinf = NULL;
|
||||
|
@ -277,7 +277,7 @@ jme_alloc_tx_resources(struct jme_adapter *jme)
|
|||
{
|
||||
struct jme_ring *txring = &jme->txring;
|
||||
|
||||
txring->desc = malloc_dma(jme->tx_ring_size * TX_DESC_SIZE,
|
||||
txring->desc = malloc_phys(jme->tx_ring_size * TX_DESC_SIZE,
|
||||
RING_DESC_ALIGN);
|
||||
if (!txring->desc) {
|
||||
DBG("Can not allocate transmit ring descriptors.\n");
|
||||
|
@ -442,7 +442,7 @@ jme_free_rx_resources(struct jme_adapter *jme)
|
|||
free(rxring->bufinf);
|
||||
}
|
||||
|
||||
free_dma(rxring->desc, jme->rx_ring_size * RX_DESC_SIZE);
|
||||
free_phys(rxring->desc, jme->rx_ring_size * RX_DESC_SIZE);
|
||||
rxring->desc = NULL;
|
||||
rxring->dma = 0;
|
||||
rxring->bufinf = NULL;
|
||||
|
@ -458,7 +458,7 @@ jme_alloc_rx_resources(struct jme_adapter *jme)
|
|||
struct jme_ring *rxring = &jme->rxring;
|
||||
struct io_buffer **bufinf;
|
||||
|
||||
rxring->desc = malloc_dma(jme->rx_ring_size * RX_DESC_SIZE,
|
||||
rxring->desc = malloc_phys(jme->rx_ring_size * RX_DESC_SIZE,
|
||||
RING_DESC_ALIGN);
|
||||
if (!rxring->desc) {
|
||||
DBG("Can not allocate receive ring descriptors.\n");
|
||||
|
|
|
@ -66,7 +66,7 @@ FILE_LICENCE ( GPL2_ONLY );
|
|||
|
||||
/*
|
||||
* Debugging levels:
|
||||
* - DBG() is for any errors, i.e. failed alloc_iob(), malloc_dma(),
|
||||
* - DBG() is for any errors, i.e. failed alloc_iob(), malloc_phys(),
|
||||
* TX overflow, corrupted packets, ...
|
||||
* - DBG2() is for successful events, like packet received,
|
||||
* packet transmitted, and other general notifications.
|
||||
|
@ -918,7 +918,7 @@ static void myri10ge_net_close ( struct net_device *netdev )
|
|||
|
||||
/* Release DMAable memory. */
|
||||
|
||||
free_dma ( priv->dma, sizeof ( *priv->dma ) );
|
||||
free_phys ( priv->dma, sizeof ( *priv->dma ) );
|
||||
|
||||
/* Erase all state from the open. */
|
||||
|
||||
|
@ -988,7 +988,7 @@ static int myri10ge_net_open ( struct net_device *netdev )
|
|||
|
||||
/* Allocate cleared DMAable buffers. */
|
||||
|
||||
priv->dma = malloc_dma ( sizeof ( *priv->dma ) , 128 );
|
||||
priv->dma = malloc_phys ( sizeof ( *priv->dma ) , 128 );
|
||||
if ( !priv->dma ) {
|
||||
rc = -ENOMEM;
|
||||
dbg = "DMA";
|
||||
|
@ -1152,7 +1152,7 @@ abort_with_receives_posted:
|
|||
free_iob ( priv->receive_iob[priv->receives_posted] );
|
||||
abort_with_dma:
|
||||
/* Because the link is not up, we don't have to reset the NIC here. */
|
||||
free_dma ( priv->dma, sizeof ( *priv->dma ) );
|
||||
free_phys ( priv->dma, sizeof ( *priv->dma ) );
|
||||
abort_with_nothing:
|
||||
/* Erase all signs of the failed open. */
|
||||
memset ( priv, 0, sizeof ( *priv ) );
|
||||
|
|
|
@ -165,7 +165,7 @@ static int myson_create_ring ( struct myson_nic *myson,
|
|||
int rc;
|
||||
|
||||
/* Allocate descriptor ring */
|
||||
ring->desc = malloc_dma ( len, MYSON_RING_ALIGN );
|
||||
ring->desc = malloc_phys ( len, MYSON_RING_ALIGN );
|
||||
if ( ! ring->desc ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc;
|
||||
|
@ -197,7 +197,7 @@ static int myson_create_ring ( struct myson_nic *myson,
|
|||
return 0;
|
||||
|
||||
err_64bit:
|
||||
free_dma ( ring->desc, len );
|
||||
free_phys ( ring->desc, len );
|
||||
ring->desc = NULL;
|
||||
err_alloc:
|
||||
return rc;
|
||||
|
@ -217,7 +217,7 @@ static void myson_destroy_ring ( struct myson_nic *myson,
|
|||
writel ( 0, myson->regs + ring->reg );
|
||||
|
||||
/* Free descriptor ring */
|
||||
free_dma ( ring->desc, len );
|
||||
free_phys ( ring->desc, len );
|
||||
ring->desc = NULL;
|
||||
ring->prod = 0;
|
||||
ring->cons = 0;
|
||||
|
|
|
@ -408,7 +408,7 @@ static int natsemi_create_ring ( struct natsemi_nic *natsemi,
|
|||
* ensure that it can't possibly cross the boundary of 32-bit
|
||||
* address space.
|
||||
*/
|
||||
ring->desc = malloc_dma ( len, len );
|
||||
ring->desc = malloc_phys ( len, len );
|
||||
if ( ! ring->desc ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc;
|
||||
|
@ -454,7 +454,7 @@ static int natsemi_create_ring ( struct natsemi_nic *natsemi,
|
|||
return 0;
|
||||
|
||||
err_64bit:
|
||||
free_dma ( ring->desc, len );
|
||||
free_phys ( ring->desc, len );
|
||||
ring->desc = NULL;
|
||||
err_alloc:
|
||||
return rc;
|
||||
|
@ -476,7 +476,7 @@ static void natsemi_destroy_ring ( struct natsemi_nic *natsemi,
|
|||
writel ( 0, natsemi->regs + ring->reg + 4 );
|
||||
|
||||
/* Free descriptor ring */
|
||||
free_dma ( ring->desc, len );
|
||||
free_phys ( ring->desc, len );
|
||||
ring->desc = NULL;
|
||||
ring->prod = 0;
|
||||
ring->cons = 0;
|
||||
|
|
|
@ -338,7 +338,7 @@ static int netfront_create_ring ( struct netfront_nic *netfront,
|
|||
ring->id_cons = 0;
|
||||
|
||||
/* Allocate and initialise shared ring */
|
||||
ring->sring.raw = malloc_dma ( PAGE_SIZE, PAGE_SIZE );
|
||||
ring->sring.raw = malloc_phys ( PAGE_SIZE, PAGE_SIZE );
|
||||
if ( ! ring->sring.raw ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc;
|
||||
|
@ -368,7 +368,7 @@ static int netfront_create_ring ( struct netfront_nic *netfront,
|
|||
err_write_num:
|
||||
xengrant_invalidate ( xen, ring->ref );
|
||||
err_permit_access:
|
||||
free_dma ( ring->sring.raw, PAGE_SIZE );
|
||||
free_phys ( ring->sring.raw, PAGE_SIZE );
|
||||
err_alloc:
|
||||
return rc;
|
||||
}
|
||||
|
@ -490,7 +490,7 @@ static void netfront_destroy_ring ( struct netfront_nic *netfront,
|
|||
xengrant_invalidate ( xen, ring->ref );
|
||||
|
||||
/* Free page */
|
||||
free_dma ( ring->sring.raw, PAGE_SIZE );
|
||||
free_phys ( ring->sring.raw, PAGE_SIZE );
|
||||
ring->sring.raw = NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -246,7 +246,7 @@ pcnet32_setup_rx_resources ( struct pcnet32_private *priv )
|
|||
{
|
||||
DBGP ( "pcnet32_setup_rx_resources\n" );
|
||||
|
||||
priv->rx_base = malloc_dma ( RX_RING_BYTES, RX_RING_ALIGN );
|
||||
priv->rx_base = malloc_phys ( RX_RING_BYTES, RX_RING_ALIGN );
|
||||
|
||||
DBG ( "priv->rx_base = %#08lx\n", virt_to_bus ( priv->rx_base ) );
|
||||
|
||||
|
@ -270,7 +270,7 @@ pcnet32_free_rx_resources ( struct pcnet32_private *priv )
|
|||
|
||||
DBGP ( "pcnet32_free_rx_resources\n" );
|
||||
|
||||
free_dma ( priv->rx_base, RX_RING_BYTES );
|
||||
free_phys ( priv->rx_base, RX_RING_BYTES );
|
||||
|
||||
for ( i = 0; i < RX_RING_SIZE; i++ ) {
|
||||
free_iob ( priv->rx_iobuf[i] );
|
||||
|
@ -290,7 +290,7 @@ pcnet32_setup_tx_resources ( struct pcnet32_private *priv )
|
|||
{
|
||||
DBGP ( "pcnet32_setup_tx_resources\n" );
|
||||
|
||||
priv->tx_base = malloc_dma ( TX_RING_BYTES, TX_RING_ALIGN );
|
||||
priv->tx_base = malloc_phys ( TX_RING_BYTES, TX_RING_ALIGN );
|
||||
|
||||
if ( ! priv->tx_base ) {
|
||||
return -ENOMEM;
|
||||
|
@ -312,7 +312,7 @@ pcnet32_free_tx_resources ( struct pcnet32_private *priv )
|
|||
{
|
||||
DBGP ( "pcnet32_free_tx_resources\n" );
|
||||
|
||||
free_dma ( priv->tx_base, TX_RING_BYTES );
|
||||
free_phys ( priv->tx_base, TX_RING_BYTES );
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
|
@ -640,7 +640,7 @@ static int phantom_create_rx_ctx ( struct phantom_nic *phantom ) {
|
|||
int rc;
|
||||
|
||||
/* Allocate context creation buffer */
|
||||
buf = malloc_dma ( sizeof ( *buf ), UNM_DMA_BUFFER_ALIGN );
|
||||
buf = malloc_phys ( sizeof ( *buf ), UNM_DMA_BUFFER_ALIGN );
|
||||
if ( ! buf ) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
|
@ -716,7 +716,7 @@ static int phantom_create_rx_ctx ( struct phantom_nic *phantom ) {
|
|||
phantom, phantom->sds_irq_mask_crb );
|
||||
|
||||
out:
|
||||
free_dma ( buf, sizeof ( *buf ) );
|
||||
free_phys ( buf, sizeof ( *buf ) );
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -765,7 +765,7 @@ static int phantom_create_tx_ctx ( struct phantom_nic *phantom ) {
|
|||
int rc;
|
||||
|
||||
/* Allocate context creation buffer */
|
||||
buf = malloc_dma ( sizeof ( *buf ), UNM_DMA_BUFFER_ALIGN );
|
||||
buf = malloc_phys ( sizeof ( *buf ), UNM_DMA_BUFFER_ALIGN );
|
||||
if ( ! buf ) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
|
@ -821,7 +821,7 @@ static int phantom_create_tx_ctx ( struct phantom_nic *phantom ) {
|
|||
phantom, phantom->cds_producer_crb );
|
||||
|
||||
out:
|
||||
free_dma ( buf, sizeof ( *buf ) );
|
||||
free_phys ( buf, sizeof ( *buf ) );
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -1164,8 +1164,8 @@ static int phantom_open ( struct net_device *netdev ) {
|
|||
int rc;
|
||||
|
||||
/* Allocate and zero descriptor rings */
|
||||
phantom->desc = malloc_dma ( sizeof ( *(phantom->desc) ),
|
||||
UNM_DMA_BUFFER_ALIGN );
|
||||
phantom->desc = malloc_phys ( sizeof ( *(phantom->desc) ),
|
||||
UNM_DMA_BUFFER_ALIGN );
|
||||
if ( ! phantom->desc ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc_desc;
|
||||
|
@ -1208,7 +1208,7 @@ static int phantom_open ( struct net_device *netdev ) {
|
|||
err_create_tx_ctx:
|
||||
phantom_destroy_rx_ctx ( phantom );
|
||||
err_create_rx_ctx:
|
||||
free_dma ( phantom->desc, sizeof ( *(phantom->desc) ) );
|
||||
free_phys ( phantom->desc, sizeof ( *(phantom->desc) ) );
|
||||
phantom->desc = NULL;
|
||||
err_alloc_desc:
|
||||
return rc;
|
||||
|
@ -1229,7 +1229,7 @@ static void phantom_close ( struct net_device *netdev ) {
|
|||
phantom_del_macaddr ( phantom, netdev->ll_broadcast );
|
||||
phantom_destroy_tx_ctx ( phantom );
|
||||
phantom_destroy_rx_ctx ( phantom );
|
||||
free_dma ( phantom->desc, sizeof ( *(phantom->desc) ) );
|
||||
free_phys ( phantom->desc, sizeof ( *(phantom->desc) ) );
|
||||
phantom->desc = NULL;
|
||||
|
||||
/* Flush any uncompleted descriptors */
|
||||
|
|
|
@ -514,7 +514,7 @@ static int realtek_create_buffer ( struct realtek_nic *rtl ) {
|
|||
return 0;
|
||||
|
||||
/* Allocate buffer */
|
||||
rtl->rx_buffer = malloc_dma ( len, RTL_RXBUF_ALIGN );
|
||||
rtl->rx_buffer = malloc_phys ( len, RTL_RXBUF_ALIGN );
|
||||
if ( ! rtl->rx_buffer ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc;
|
||||
|
@ -539,7 +539,7 @@ static int realtek_create_buffer ( struct realtek_nic *rtl ) {
|
|||
return 0;
|
||||
|
||||
err_64bit:
|
||||
free_dma ( rtl->rx_buffer, len );
|
||||
free_phys ( rtl->rx_buffer, len );
|
||||
rtl->rx_buffer = NULL;
|
||||
err_alloc:
|
||||
return rc;
|
||||
|
@ -561,7 +561,7 @@ static void realtek_destroy_buffer ( struct realtek_nic *rtl ) {
|
|||
writel ( 0, rtl->regs + RTL_RBSTART );
|
||||
|
||||
/* Free buffer */
|
||||
free_dma ( rtl->rx_buffer, len );
|
||||
free_phys ( rtl->rx_buffer, len );
|
||||
rtl->rx_buffer = NULL;
|
||||
rtl->rx_offset = 0;
|
||||
}
|
||||
|
@ -582,7 +582,7 @@ static int realtek_create_ring ( struct realtek_nic *rtl,
|
|||
return 0;
|
||||
|
||||
/* Allocate descriptor ring */
|
||||
ring->desc = malloc_dma ( ring->len, RTL_RING_ALIGN );
|
||||
ring->desc = malloc_phys ( ring->len, RTL_RING_ALIGN );
|
||||
if ( ! ring->desc )
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -623,7 +623,7 @@ static void realtek_destroy_ring ( struct realtek_nic *rtl,
|
|||
writel ( 0, rtl->regs + ring->reg + 4 );
|
||||
|
||||
/* Free descriptor ring */
|
||||
free_dma ( ring->desc, ring->len );
|
||||
free_phys ( ring->desc, ring->len );
|
||||
ring->desc = NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -292,7 +292,7 @@ static int rhine_create_ring ( struct rhine_nic *rhn,
|
|||
unsigned int i;
|
||||
|
||||
/* Allocate descriptors */
|
||||
ring->desc = malloc_dma ( len, RHINE_RING_ALIGN );
|
||||
ring->desc = malloc_phys ( len, RHINE_RING_ALIGN );
|
||||
if ( ! ring->desc )
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -328,7 +328,7 @@ static void rhine_destroy_ring ( struct rhine_nic *rhn,
|
|||
writel ( 0, rhn->regs + ring->reg );
|
||||
|
||||
/* Free descriptor ring */
|
||||
free_dma ( ring->desc, len );
|
||||
free_phys ( ring->desc, len );
|
||||
ring->desc = NULL;
|
||||
ring->prod = 0;
|
||||
ring->cons = 0;
|
||||
|
|
|
@ -328,8 +328,8 @@ static int rtl818x_init_rx_ring(struct net80211_device *dev)
|
|||
struct rtl818x_rx_desc *entry;
|
||||
int i;
|
||||
|
||||
priv->rx_ring = malloc_dma(sizeof(*priv->rx_ring) * RTL818X_RX_RING_SIZE,
|
||||
RTL818X_RING_ALIGN);
|
||||
priv->rx_ring = malloc_phys(sizeof(*priv->rx_ring) * RTL818X_RX_RING_SIZE,
|
||||
RTL818X_RING_ALIGN);
|
||||
priv->rx_ring_dma = virt_to_bus(priv->rx_ring);
|
||||
if (!priv->rx_ring) {
|
||||
DBG("rtl818x %s: cannot allocate RX ring\n", dev->netdev->name);
|
||||
|
@ -364,7 +364,7 @@ static void rtl818x_free_rx_ring(struct net80211_device *dev)
|
|||
priv->rx_buf[i] = NULL;
|
||||
}
|
||||
|
||||
free_dma(priv->rx_ring, sizeof(*priv->rx_ring) * RTL818X_RX_RING_SIZE);
|
||||
free_phys(priv->rx_ring, sizeof(*priv->rx_ring) * RTL818X_RX_RING_SIZE);
|
||||
priv->rx_ring = NULL;
|
||||
}
|
||||
|
||||
|
@ -373,8 +373,8 @@ static int rtl818x_init_tx_ring(struct net80211_device *dev)
|
|||
struct rtl818x_priv *priv = dev->priv;
|
||||
int i;
|
||||
|
||||
priv->tx_ring = malloc_dma(sizeof(*priv->tx_ring) * RTL818X_TX_RING_SIZE,
|
||||
RTL818X_RING_ALIGN);
|
||||
priv->tx_ring = malloc_phys(sizeof(*priv->tx_ring) * RTL818X_TX_RING_SIZE,
|
||||
RTL818X_RING_ALIGN);
|
||||
priv->tx_ring_dma = virt_to_bus(priv->tx_ring);
|
||||
if (!priv->tx_ring) {
|
||||
DBG("rtl818x %s: cannot allocate TX ring\n", dev->netdev->name);
|
||||
|
@ -402,7 +402,7 @@ static void rtl818x_free_tx_ring(struct net80211_device *dev)
|
|||
priv->tx_buf[i] = NULL;
|
||||
}
|
||||
|
||||
free_dma(priv->tx_ring, sizeof(*priv->tx_ring) * RTL818X_TX_RING_SIZE);
|
||||
free_phys(priv->tx_ring, sizeof(*priv->tx_ring) * RTL818X_TX_RING_SIZE);
|
||||
priv->tx_ring = NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -37,7 +37,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
|
|||
|
||||
void efx_hunt_free_special_buffer(void *buf, int bytes)
|
||||
{
|
||||
free_dma(buf, bytes);
|
||||
free_phys(buf, bytes);
|
||||
}
|
||||
|
||||
static void *efx_hunt_alloc_special_buffer(int bytes,
|
||||
|
@ -50,7 +50,7 @@ static void *efx_hunt_alloc_special_buffer(int bytes,
|
|||
* buffer will be passed into an MC_CMD_INIT_*Q command to setup the
|
||||
* appropriate type of queue via MCDI.
|
||||
*/
|
||||
buffer = malloc_dma(bytes, EFX_BUF_ALIGN);
|
||||
buffer = malloc_phys(bytes, EFX_BUF_ALIGN);
|
||||
if (!buffer)
|
||||
return NULL;
|
||||
|
||||
|
|
|
@ -552,7 +552,7 @@ static int sis190_open(struct net_device *dev)
|
|||
int rc;
|
||||
|
||||
/* Allocate TX ring */
|
||||
tp->TxDescRing = malloc_dma(TX_RING_BYTES, RING_ALIGNMENT);
|
||||
tp->TxDescRing = malloc_phys(TX_RING_BYTES, RING_ALIGNMENT);
|
||||
if (!tp->TxDescRing) {
|
||||
DBG("sis190: TX ring allocation failed\n");
|
||||
rc = -ENOMEM;
|
||||
|
@ -561,7 +561,7 @@ static int sis190_open(struct net_device *dev)
|
|||
tp->tx_dma = cpu_to_le32(virt_to_bus(tp->TxDescRing));
|
||||
|
||||
/* Allocate RX ring */
|
||||
tp->RxDescRing = malloc_dma(RX_RING_BYTES, RING_ALIGNMENT);
|
||||
tp->RxDescRing = malloc_phys(RX_RING_BYTES, RING_ALIGNMENT);
|
||||
if (!tp->RxDescRing) {
|
||||
DBG("sis190: RX ring allocation failed\n");
|
||||
rc = -ENOMEM;
|
||||
|
@ -600,8 +600,8 @@ static void sis190_free(struct net_device *dev)
|
|||
struct sis190_private *tp = netdev_priv(dev);
|
||||
int i;
|
||||
|
||||
free_dma(tp->TxDescRing, TX_RING_BYTES);
|
||||
free_dma(tp->RxDescRing, RX_RING_BYTES);
|
||||
free_phys(tp->TxDescRing, TX_RING_BYTES);
|
||||
free_phys(tp->RxDescRing, RX_RING_BYTES);
|
||||
|
||||
tp->TxDescRing = NULL;
|
||||
tp->RxDescRing = NULL;
|
||||
|
|
|
@ -1699,7 +1699,7 @@ void skge_free(struct net_device *dev)
|
|||
free(skge->tx_ring.start);
|
||||
skge->tx_ring.start = NULL;
|
||||
|
||||
free_dma(skge->mem, RING_SIZE);
|
||||
free_phys(skge->mem, RING_SIZE);
|
||||
skge->mem = NULL;
|
||||
skge->dma = 0;
|
||||
}
|
||||
|
@ -1714,7 +1714,7 @@ static int skge_up(struct net_device *dev)
|
|||
|
||||
DBG2(PFX "%s: enabling interface\n", dev->name);
|
||||
|
||||
skge->mem = malloc_dma(RING_SIZE, SKGE_RING_ALIGN);
|
||||
skge->mem = malloc_phys(RING_SIZE, SKGE_RING_ALIGN);
|
||||
skge->dma = virt_to_bus(skge->mem);
|
||||
if (!skge->mem)
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -1112,10 +1112,10 @@ nomem:
|
|||
/* Free the le and ring buffers */
|
||||
static void sky2_free_rings(struct sky2_port *sky2)
|
||||
{
|
||||
free_dma(sky2->rx_le, RX_LE_BYTES);
|
||||
free_phys(sky2->rx_le, RX_LE_BYTES);
|
||||
free(sky2->rx_ring);
|
||||
|
||||
free_dma(sky2->tx_le, TX_RING_SIZE * sizeof(struct sky2_tx_le));
|
||||
free_phys(sky2->tx_le, TX_RING_SIZE * sizeof(struct sky2_tx_le));
|
||||
free(sky2->tx_ring);
|
||||
|
||||
sky2->tx_le = NULL;
|
||||
|
@ -1137,7 +1137,7 @@ static int sky2_up(struct net_device *dev)
|
|||
netdev_link_down(dev);
|
||||
|
||||
/* must be power of 2 */
|
||||
sky2->tx_le = malloc_dma(TX_RING_SIZE * sizeof(struct sky2_tx_le), TX_RING_ALIGN);
|
||||
sky2->tx_le = malloc_phys(TX_RING_SIZE * sizeof(struct sky2_tx_le), TX_RING_ALIGN);
|
||||
sky2->tx_le_map = virt_to_bus(sky2->tx_le);
|
||||
if (!sky2->tx_le)
|
||||
goto err_out;
|
||||
|
@ -1149,7 +1149,7 @@ static int sky2_up(struct net_device *dev)
|
|||
|
||||
tx_init(sky2);
|
||||
|
||||
sky2->rx_le = malloc_dma(RX_LE_BYTES, RX_RING_ALIGN);
|
||||
sky2->rx_le = malloc_phys(RX_LE_BYTES, RX_RING_ALIGN);
|
||||
sky2->rx_le_map = virt_to_bus(sky2->rx_le);
|
||||
if (!sky2->rx_le)
|
||||
goto err_out;
|
||||
|
@ -2285,7 +2285,7 @@ static int sky2_probe(struct pci_device *pdev)
|
|||
}
|
||||
|
||||
/* ring for status responses */
|
||||
hw->st_le = malloc_dma(STATUS_LE_BYTES, STATUS_RING_ALIGN);
|
||||
hw->st_le = malloc_phys(STATUS_LE_BYTES, STATUS_RING_ALIGN);
|
||||
if (!hw->st_le)
|
||||
goto err_out_iounmap;
|
||||
hw->st_dma = virt_to_bus(hw->st_le);
|
||||
|
@ -2344,7 +2344,7 @@ err_out_free_netdev:
|
|||
netdev_put(dev);
|
||||
err_out_free_pci:
|
||||
sky2_write8(hw, B0_CTST, CS_RST_SET);
|
||||
free_dma(hw->st_le, STATUS_LE_BYTES);
|
||||
free_phys(hw->st_le, STATUS_LE_BYTES);
|
||||
err_out_iounmap:
|
||||
iounmap((void *)hw->regs);
|
||||
err_out_free_hw:
|
||||
|
@ -2373,7 +2373,7 @@ static void sky2_remove(struct pci_device *pdev)
|
|||
sky2_write8(hw, B0_CTST, CS_RST_SET);
|
||||
sky2_read8(hw, B0_CTST);
|
||||
|
||||
free_dma(hw->st_le, STATUS_LE_BYTES);
|
||||
free_phys(hw->st_le, STATUS_LE_BYTES);
|
||||
|
||||
for (i = hw->ports-1; i >= 0; --i) {
|
||||
netdev_nullify(hw->dev[i]);
|
||||
|
|
|
@ -42,7 +42,7 @@ void tg3_rx_prodring_fini(struct tg3_rx_prodring_set *tpr)
|
|||
{ DBGP("%s\n", __func__);
|
||||
|
||||
if (tpr->rx_std) {
|
||||
free_dma(tpr->rx_std, TG3_RX_STD_RING_BYTES(tp));
|
||||
free_phys(tpr->rx_std, TG3_RX_STD_RING_BYTES(tp));
|
||||
tpr->rx_std = NULL;
|
||||
}
|
||||
}
|
||||
|
@ -55,7 +55,7 @@ static void tg3_free_consistent(struct tg3 *tp)
|
|||
{ DBGP("%s\n", __func__);
|
||||
|
||||
if (tp->tx_ring) {
|
||||
free_dma(tp->tx_ring, TG3_TX_RING_BYTES);
|
||||
free_phys(tp->tx_ring, TG3_TX_RING_BYTES);
|
||||
tp->tx_ring = NULL;
|
||||
}
|
||||
|
||||
|
@ -63,7 +63,7 @@ static void tg3_free_consistent(struct tg3 *tp)
|
|||
tp->tx_buffers = NULL;
|
||||
|
||||
if (tp->rx_rcb) {
|
||||
free_dma(tp->rx_rcb, TG3_RX_RCB_RING_BYTES(tp));
|
||||
free_phys(tp->rx_rcb, TG3_RX_RCB_RING_BYTES(tp));
|
||||
tp->rx_rcb_mapping = 0;
|
||||
tp->rx_rcb = NULL;
|
||||
}
|
||||
|
@ -71,7 +71,7 @@ static void tg3_free_consistent(struct tg3 *tp)
|
|||
tg3_rx_prodring_fini(&tp->prodring);
|
||||
|
||||
if (tp->hw_status) {
|
||||
free_dma(tp->hw_status, TG3_HW_STATUS_SIZE);
|
||||
free_phys(tp->hw_status, TG3_HW_STATUS_SIZE);
|
||||
tp->status_mapping = 0;
|
||||
tp->hw_status = NULL;
|
||||
}
|
||||
|
@ -87,7 +87,7 @@ int tg3_alloc_consistent(struct tg3 *tp)
|
|||
struct tg3_hw_status *sblk;
|
||||
struct tg3_rx_prodring_set *tpr = &tp->prodring;
|
||||
|
||||
tp->hw_status = malloc_dma(TG3_HW_STATUS_SIZE, TG3_DMA_ALIGNMENT);
|
||||
tp->hw_status = malloc_phys(TG3_HW_STATUS_SIZE, TG3_DMA_ALIGNMENT);
|
||||
if (!tp->hw_status) {
|
||||
DBGC(tp->dev, "hw_status alloc failed\n");
|
||||
goto err_out;
|
||||
|
@ -97,7 +97,7 @@ int tg3_alloc_consistent(struct tg3 *tp)
|
|||
memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
|
||||
sblk = tp->hw_status;
|
||||
|
||||
tpr->rx_std = malloc_dma(TG3_RX_STD_RING_BYTES(tp), TG3_DMA_ALIGNMENT);
|
||||
tpr->rx_std = malloc_phys(TG3_RX_STD_RING_BYTES(tp), TG3_DMA_ALIGNMENT);
|
||||
if (!tpr->rx_std) {
|
||||
DBGC(tp->dev, "rx prodring alloc failed\n");
|
||||
goto err_out;
|
||||
|
@ -109,7 +109,7 @@ int tg3_alloc_consistent(struct tg3 *tp)
|
|||
if (!tp->tx_buffers)
|
||||
goto err_out;
|
||||
|
||||
tp->tx_ring = malloc_dma(TG3_TX_RING_BYTES, TG3_DMA_ALIGNMENT);
|
||||
tp->tx_ring = malloc_phys(TG3_TX_RING_BYTES, TG3_DMA_ALIGNMENT);
|
||||
if (!tp->tx_ring)
|
||||
goto err_out;
|
||||
tp->tx_desc_mapping = virt_to_bus(tp->tx_ring);
|
||||
|
@ -123,7 +123,7 @@ int tg3_alloc_consistent(struct tg3 *tp)
|
|||
|
||||
tp->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
|
||||
|
||||
tp->rx_rcb = malloc_dma(TG3_RX_RCB_RING_BYTES(tp), TG3_DMA_ALIGNMENT);
|
||||
tp->rx_rcb = malloc_phys(TG3_RX_RCB_RING_BYTES(tp), TG3_DMA_ALIGNMENT);
|
||||
if (!tp->rx_rcb)
|
||||
goto err_out;
|
||||
tp->rx_rcb_mapping = virt_to_bus(tp->rx_rcb);
|
||||
|
@ -541,7 +541,7 @@ static int tg3_test_dma(struct tg3 *tp)
|
|||
u32 *buf;
|
||||
int ret = 0;
|
||||
|
||||
buf = malloc_dma(TEST_BUFFER_SIZE, TG3_DMA_ALIGNMENT);
|
||||
buf = malloc_phys(TEST_BUFFER_SIZE, TG3_DMA_ALIGNMENT);
|
||||
if (!buf) {
|
||||
ret = -ENOMEM;
|
||||
goto out_nofree;
|
||||
|
@ -708,7 +708,7 @@ static int tg3_test_dma(struct tg3 *tp)
|
|||
}
|
||||
|
||||
out:
|
||||
free_dma(buf, TEST_BUFFER_SIZE);
|
||||
free_phys(buf, TEST_BUFFER_SIZE);
|
||||
out_nofree:
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -320,7 +320,8 @@ static int velocity_alloc_rings ( struct velocity_nic *vlc ) {
|
|||
vlc->rx_prod = 0;
|
||||
vlc->rx_cons = 0;
|
||||
vlc->rx_commit = 0;
|
||||
vlc->rx_ring = malloc_dma ( VELOCITY_RXDESC_SIZE, VELOCITY_RING_ALIGN );
|
||||
vlc->rx_ring = malloc_phys ( VELOCITY_RXDESC_SIZE,
|
||||
VELOCITY_RING_ALIGN );
|
||||
if ( ! vlc->rx_ring )
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -332,7 +333,8 @@ static int velocity_alloc_rings ( struct velocity_nic *vlc ) {
|
|||
/* Allocate TX descriptor ring */
|
||||
vlc->tx_prod = 0;
|
||||
vlc->tx_cons = 0;
|
||||
vlc->tx_ring = malloc_dma ( VELOCITY_TXDESC_SIZE, VELOCITY_RING_ALIGN );
|
||||
vlc->tx_ring = malloc_phys ( VELOCITY_TXDESC_SIZE,
|
||||
VELOCITY_RING_ALIGN );
|
||||
if ( ! vlc->tx_ring ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_tx_alloc;
|
||||
|
@ -356,7 +358,7 @@ static int velocity_alloc_rings ( struct velocity_nic *vlc ) {
|
|||
return 0;
|
||||
|
||||
err_tx_alloc:
|
||||
free_dma ( vlc->rx_ring, VELOCITY_RXDESC_SIZE );
|
||||
free_phys ( vlc->rx_ring, VELOCITY_RXDESC_SIZE );
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -482,7 +484,7 @@ static void velocity_close ( struct net_device *netdev ) {
|
|||
writew ( 0, vlc->regs + VELOCITY_RXDESCNUM );
|
||||
|
||||
/* Destroy RX ring */
|
||||
free_dma ( vlc->rx_ring, VELOCITY_RXDESC_SIZE );
|
||||
free_phys ( vlc->rx_ring, VELOCITY_RXDESC_SIZE );
|
||||
vlc->rx_ring = NULL;
|
||||
vlc->rx_prod = 0;
|
||||
vlc->rx_cons = 0;
|
||||
|
@ -499,7 +501,7 @@ static void velocity_close ( struct net_device *netdev ) {
|
|||
writew ( 0, vlc->regs + VELOCITY_TXDESCNUM );
|
||||
|
||||
/* Destroy TX ring */
|
||||
free_dma ( vlc->tx_ring, VELOCITY_TXDESC_SIZE );
|
||||
free_phys ( vlc->tx_ring, VELOCITY_TXDESC_SIZE );
|
||||
vlc->tx_ring = NULL;
|
||||
vlc->tx_prod = 0;
|
||||
vlc->tx_cons = 0;
|
||||
|
|
|
@ -465,7 +465,8 @@ static int vmxnet3_open ( struct net_device *netdev ) {
|
|||
int rc;
|
||||
|
||||
/* Allocate DMA areas */
|
||||
vmxnet->dma = malloc_dma ( sizeof ( *vmxnet->dma ), VMXNET3_DMA_ALIGN );
|
||||
vmxnet->dma = malloc_phys ( sizeof ( *vmxnet->dma ),
|
||||
VMXNET3_DMA_ALIGN );
|
||||
if ( ! vmxnet->dma ) {
|
||||
DBGC ( vmxnet, "VMXNET3 %p could not allocate DMA area\n",
|
||||
vmxnet );
|
||||
|
@ -542,7 +543,7 @@ static int vmxnet3_open ( struct net_device *netdev ) {
|
|||
err_activate:
|
||||
vmxnet3_flush_tx ( netdev );
|
||||
vmxnet3_flush_rx ( netdev );
|
||||
free_dma ( vmxnet->dma, sizeof ( *vmxnet->dma ) );
|
||||
free_phys ( vmxnet->dma, sizeof ( *vmxnet->dma ) );
|
||||
err_alloc_dma:
|
||||
return rc;
|
||||
}
|
||||
|
@ -559,7 +560,7 @@ static void vmxnet3_close ( struct net_device *netdev ) {
|
|||
vmxnet3_command ( vmxnet, VMXNET3_CMD_RESET_DEV );
|
||||
vmxnet3_flush_tx ( netdev );
|
||||
vmxnet3_flush_rx ( netdev );
|
||||
free_dma ( vmxnet->dma, sizeof ( *vmxnet->dma ) );
|
||||
free_phys ( vmxnet->dma, sizeof ( *vmxnet->dma ) );
|
||||
}
|
||||
|
||||
/** vmxnet3 net device operations */
|
||||
|
|
|
@ -624,10 +624,10 @@ __vxge_hw_ring_create(struct __vxge_hw_virtualpath *vpath,
|
|||
hldev = vpath->hldev;
|
||||
vp_id = vpath->vp_id;
|
||||
|
||||
ring->rxdl = malloc_dma(sizeof(struct __vxge_hw_ring_block),
|
||||
ring->rxdl = malloc_phys(sizeof(struct __vxge_hw_ring_block),
|
||||
sizeof(struct __vxge_hw_ring_block));
|
||||
if (!ring->rxdl) {
|
||||
vxge_debug(VXGE_ERR, "%s:%d malloc_dma error\n",
|
||||
vxge_debug(VXGE_ERR, "%s:%d malloc_phys error\n",
|
||||
__func__, __LINE__);
|
||||
status = VXGE_HW_ERR_OUT_OF_MEMORY;
|
||||
goto exit;
|
||||
|
@ -667,7 +667,7 @@ enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_ring *ring)
|
|||
}
|
||||
|
||||
if (ring->rxdl) {
|
||||
free_dma(ring->rxdl, sizeof(struct __vxge_hw_ring_block));
|
||||
free_phys(ring->rxdl, sizeof(struct __vxge_hw_ring_block));
|
||||
ring->rxdl = NULL;
|
||||
}
|
||||
ring->rxd_offset = 0;
|
||||
|
@ -826,10 +826,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_virtualpath *vpath,
|
|||
fifo->tx_intr_num = (vpath->vp_id * VXGE_HW_MAX_INTR_PER_VP)
|
||||
+ VXGE_HW_VPATH_INTR_TX;
|
||||
|
||||
fifo->txdl = malloc_dma(sizeof(struct vxge_hw_fifo_txd)
|
||||
fifo->txdl = malloc_phys(sizeof(struct vxge_hw_fifo_txd)
|
||||
* fifo->depth, fifo->depth);
|
||||
if (!fifo->txdl) {
|
||||
vxge_debug(VXGE_ERR, "%s:%d malloc_dma error\n",
|
||||
vxge_debug(VXGE_ERR, "%s:%d malloc_phys error\n",
|
||||
__func__, __LINE__);
|
||||
return VXGE_HW_ERR_OUT_OF_MEMORY;
|
||||
}
|
||||
|
@ -846,7 +846,7 @@ enum vxge_hw_status __vxge_hw_fifo_delete(struct __vxge_hw_fifo *fifo)
|
|||
vxge_trace();
|
||||
|
||||
if (fifo->txdl)
|
||||
free_dma(fifo->txdl,
|
||||
free_phys(fifo->txdl,
|
||||
sizeof(struct vxge_hw_fifo_txd) * fifo->depth);
|
||||
|
||||
fifo->txdl = NULL;
|
||||
|
|
|
@ -565,8 +565,8 @@ static int ehci_ring_alloc ( struct ehci_device *ehci,
|
|||
}
|
||||
|
||||
/* Allocate queue head */
|
||||
ring->head = malloc_dma ( sizeof ( *ring->head ),
|
||||
ehci_align ( sizeof ( *ring->head ) ) );
|
||||
ring->head = malloc_phys ( sizeof ( *ring->head ),
|
||||
ehci_align ( sizeof ( *ring->head ) ) );
|
||||
if ( ! ring->head ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc_queue;
|
||||
|
@ -579,7 +579,7 @@ static int ehci_ring_alloc ( struct ehci_device *ehci,
|
|||
|
||||
/* Allocate transfer descriptors */
|
||||
len = ( EHCI_RING_COUNT * sizeof ( ring->desc[0] ) );
|
||||
ring->desc = malloc_dma ( len, sizeof ( ring->desc[0] ) );
|
||||
ring->desc = malloc_phys ( len, sizeof ( ring->desc[0] ) );
|
||||
if ( ! ring->desc ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc_desc;
|
||||
|
@ -607,10 +607,10 @@ static int ehci_ring_alloc ( struct ehci_device *ehci,
|
|||
return 0;
|
||||
|
||||
err_unreachable_desc:
|
||||
free_dma ( ring->desc, len );
|
||||
free_phys ( ring->desc, len );
|
||||
err_alloc_desc:
|
||||
err_unreachable_queue:
|
||||
free_dma ( ring->head, sizeof ( *ring->head ) );
|
||||
free_phys ( ring->head, sizeof ( *ring->head ) );
|
||||
err_alloc_queue:
|
||||
free ( ring->iobuf );
|
||||
err_alloc_iobuf:
|
||||
|
@ -631,10 +631,11 @@ static void ehci_ring_free ( struct ehci_ring *ring ) {
|
|||
assert ( ring->iobuf[i] == NULL );
|
||||
|
||||
/* Free transfer descriptors */
|
||||
free_dma ( ring->desc, ( EHCI_RING_COUNT * sizeof ( ring->desc[0] ) ) );
|
||||
free_phys ( ring->desc, ( EHCI_RING_COUNT *
|
||||
sizeof ( ring->desc[0] ) ) );
|
||||
|
||||
/* Free queue head */
|
||||
free_dma ( ring->head, sizeof ( *ring->head ) );
|
||||
free_phys ( ring->head, sizeof ( *ring->head ) );
|
||||
|
||||
/* Free I/O buffers */
|
||||
free ( ring->iobuf );
|
||||
|
@ -1787,8 +1788,8 @@ static int ehci_bus_open ( struct usb_bus *bus ) {
|
|||
assert ( list_empty ( &ehci->periodic ) );
|
||||
|
||||
/* Allocate and initialise asynchronous queue head */
|
||||
ehci->head = malloc_dma ( sizeof ( *ehci->head ),
|
||||
ehci_align ( sizeof ( *ehci->head ) ) );
|
||||
ehci->head = malloc_phys ( sizeof ( *ehci->head ),
|
||||
ehci_align ( sizeof ( *ehci->head ) ) );
|
||||
if ( ! ehci->head ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc_head;
|
||||
|
@ -1816,7 +1817,7 @@ static int ehci_bus_open ( struct usb_bus *bus ) {
|
|||
/* Allocate periodic frame list */
|
||||
frames = EHCI_PERIODIC_FRAMES ( ehci->flsize );
|
||||
len = ( frames * sizeof ( ehci->frame[0] ) );
|
||||
ehci->frame = malloc_dma ( len, EHCI_PAGE_ALIGN );
|
||||
ehci->frame = malloc_phys ( len, EHCI_PAGE_ALIGN );
|
||||
if ( ! ehci->frame ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc_frame;
|
||||
|
@ -1836,10 +1837,10 @@ static int ehci_bus_open ( struct usb_bus *bus ) {
|
|||
|
||||
ehci_stop ( ehci );
|
||||
err_unreachable_frame:
|
||||
free_dma ( ehci->frame, len );
|
||||
free_phys ( ehci->frame, len );
|
||||
err_alloc_frame:
|
||||
err_ctrldssegment:
|
||||
free_dma ( ehci->head, sizeof ( *ehci->head ) );
|
||||
free_phys ( ehci->head, sizeof ( *ehci->head ) );
|
||||
err_alloc_head:
|
||||
return rc;
|
||||
}
|
||||
|
@ -1861,10 +1862,10 @@ static void ehci_bus_close ( struct usb_bus *bus ) {
|
|||
ehci_stop ( ehci );
|
||||
|
||||
/* Free periodic frame list */
|
||||
free_dma ( ehci->frame, ( frames * sizeof ( ehci->frame[0] ) ) );
|
||||
free_phys ( ehci->frame, ( frames * sizeof ( ehci->frame[0] ) ) );
|
||||
|
||||
/* Free asynchronous schedule */
|
||||
free_dma ( ehci->head, sizeof ( *ehci->head ) );
|
||||
free_phys ( ehci->head, sizeof ( *ehci->head ) );
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -179,7 +179,7 @@ static int uhci_ring_alloc ( struct uhci_ring *ring ) {
|
|||
memset ( ring, 0, sizeof ( *ring ) );
|
||||
|
||||
/* Allocate queue head */
|
||||
ring->head = malloc_dma ( sizeof ( *ring->head ), UHCI_ALIGN );
|
||||
ring->head = malloc_phys ( sizeof ( *ring->head ), UHCI_ALIGN );
|
||||
if ( ! ring->head ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc;
|
||||
|
@ -194,7 +194,7 @@ static int uhci_ring_alloc ( struct uhci_ring *ring ) {
|
|||
return 0;
|
||||
|
||||
err_unreachable:
|
||||
free_dma ( ring->head, sizeof ( *ring->head ) );
|
||||
free_phys ( ring->head, sizeof ( *ring->head ) );
|
||||
err_alloc:
|
||||
return rc;
|
||||
}
|
||||
|
@ -213,7 +213,7 @@ static void uhci_ring_free ( struct uhci_ring *ring ) {
|
|||
assert ( ring->xfer[i] == NULL );
|
||||
|
||||
/* Free queue head */
|
||||
free_dma ( ring->head, sizeof ( *ring->head ) );
|
||||
free_phys ( ring->head, sizeof ( *ring->head ) );
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -263,7 +263,7 @@ static int uhci_enqueue ( struct uhci_ring *ring, struct io_buffer *iobuf,
|
|||
|
||||
/* Allocate transfer descriptors */
|
||||
len = ( count * sizeof ( xfer->desc[0] ) );
|
||||
xfer->desc = malloc_dma ( len, UHCI_ALIGN );
|
||||
xfer->desc = malloc_phys ( len, UHCI_ALIGN );
|
||||
if ( ! xfer->desc ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc_desc;
|
||||
|
@ -299,7 +299,7 @@ static int uhci_enqueue ( struct uhci_ring *ring, struct io_buffer *iobuf,
|
|||
return 0;
|
||||
|
||||
err_unreachable_desc:
|
||||
free_dma ( xfer->desc, len );
|
||||
free_phys ( xfer->desc, len );
|
||||
err_alloc_desc:
|
||||
free ( xfer );
|
||||
err_alloc_xfer:
|
||||
|
@ -377,7 +377,7 @@ static struct io_buffer * uhci_dequeue ( struct uhci_ring *ring ) {
|
|||
|
||||
/* Free transfer descriptors */
|
||||
len = ( xfer->prod * sizeof ( xfer->desc[0] ) );
|
||||
free_dma ( xfer->desc, len );
|
||||
free_phys ( xfer->desc, len );
|
||||
|
||||
/* Free transfer */
|
||||
free ( xfer );
|
||||
|
@ -1312,7 +1312,7 @@ static int uhci_bus_open ( struct usb_bus *bus ) {
|
|||
assert ( list_empty ( &uhci->periodic ) );
|
||||
|
||||
/* Allocate and initialise asynchronous queue head */
|
||||
uhci->head = malloc_dma ( sizeof ( *uhci->head ), UHCI_ALIGN );
|
||||
uhci->head = malloc_phys ( sizeof ( *uhci->head ), UHCI_ALIGN );
|
||||
if ( ! uhci->head ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc_head;
|
||||
|
@ -1324,8 +1324,8 @@ static int uhci_bus_open ( struct usb_bus *bus ) {
|
|||
uhci_async_schedule ( uhci );
|
||||
|
||||
/* Allocate periodic frame list */
|
||||
uhci->frame = malloc_dma ( sizeof ( *uhci->frame ),
|
||||
sizeof ( *uhci->frame ) );
|
||||
uhci->frame = malloc_phys ( sizeof ( *uhci->frame ),
|
||||
sizeof ( *uhci->frame ) );
|
||||
if ( ! uhci->frame ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc_frame;
|
||||
|
@ -1343,10 +1343,10 @@ static int uhci_bus_open ( struct usb_bus *bus ) {
|
|||
|
||||
uhci_stop ( uhci );
|
||||
err_unreachable_frame:
|
||||
free_dma ( uhci->frame, sizeof ( *uhci->frame ) );
|
||||
free_phys ( uhci->frame, sizeof ( *uhci->frame ) );
|
||||
err_alloc_frame:
|
||||
err_unreachable_head:
|
||||
free_dma ( uhci->head, sizeof ( *uhci->head ) );
|
||||
free_phys ( uhci->head, sizeof ( *uhci->head ) );
|
||||
err_alloc_head:
|
||||
return rc;
|
||||
}
|
||||
|
@ -1367,10 +1367,10 @@ static void uhci_bus_close ( struct usb_bus *bus ) {
|
|||
uhci_stop ( uhci );
|
||||
|
||||
/* Free periodic frame list */
|
||||
free_dma ( uhci->frame, sizeof ( *uhci->frame ) );
|
||||
free_phys ( uhci->frame, sizeof ( *uhci->frame ) );
|
||||
|
||||
/* Free asynchronous schedule */
|
||||
free_dma ( uhci->head, sizeof ( *uhci->head ) );
|
||||
free_phys ( uhci->head, sizeof ( *uhci->head ) );
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -919,7 +919,7 @@ static int xhci_dcbaa_alloc ( struct xhci_device *xhci ) {
|
|||
* with a minimum of 64 bytes).
|
||||
*/
|
||||
len = ( ( xhci->slots + 1 ) * sizeof ( xhci->dcbaa[0] ) );
|
||||
xhci->dcbaa = malloc_dma ( len, xhci_align ( len ) );
|
||||
xhci->dcbaa = malloc_phys ( len, xhci_align ( len ) );
|
||||
if ( ! xhci->dcbaa ) {
|
||||
DBGC ( xhci, "XHCI %s could not allocate DCBAA\n", xhci->name );
|
||||
rc = -ENOMEM;
|
||||
|
@ -938,7 +938,7 @@ static int xhci_dcbaa_alloc ( struct xhci_device *xhci ) {
|
|||
return 0;
|
||||
|
||||
err_writeq:
|
||||
free_dma ( xhci->dcbaa, len );
|
||||
free_phys ( xhci->dcbaa, len );
|
||||
err_alloc:
|
||||
return rc;
|
||||
}
|
||||
|
@ -961,7 +961,7 @@ static void xhci_dcbaa_free ( struct xhci_device *xhci ) {
|
|||
|
||||
/* Free DCBAA */
|
||||
len = ( ( xhci->slots + 1 ) * sizeof ( xhci->dcbaa[0] ) );
|
||||
free_dma ( xhci->dcbaa, len );
|
||||
free_phys ( xhci->dcbaa, len );
|
||||
}
|
||||
|
||||
/******************************************************************************
|
||||
|
@ -1002,7 +1002,7 @@ static int xhci_scratchpad_alloc ( struct xhci_device *xhci ) {
|
|||
/* Allocate scratchpad array */
|
||||
array_len = ( xhci->scratchpads * sizeof ( xhci->scratchpad_array[0] ));
|
||||
xhci->scratchpad_array =
|
||||
malloc_dma ( array_len, xhci_align ( array_len ) );
|
||||
malloc_phys ( array_len, xhci_align ( array_len ) );
|
||||
if ( ! xhci->scratchpad_array ) {
|
||||
DBGC ( xhci, "XHCI %s could not allocate scratchpad buffer "
|
||||
"array\n", xhci->name );
|
||||
|
@ -1027,7 +1027,7 @@ static int xhci_scratchpad_alloc ( struct xhci_device *xhci ) {
|
|||
( virt_to_phys ( xhci->scratchpad_array ) + array_len ) );
|
||||
return 0;
|
||||
|
||||
free_dma ( xhci->scratchpad_array, array_len );
|
||||
free_phys ( xhci->scratchpad_array, array_len );
|
||||
err_alloc_array:
|
||||
ufree ( xhci->scratchpad );
|
||||
err_alloc:
|
||||
|
@ -1052,7 +1052,7 @@ static void xhci_scratchpad_free ( struct xhci_device *xhci ) {
|
|||
|
||||
/* Free scratchpad array */
|
||||
array_len = ( xhci->scratchpads * sizeof ( xhci->scratchpad_array[0] ));
|
||||
free_dma ( xhci->scratchpad_array, array_len );
|
||||
free_phys ( xhci->scratchpad_array, array_len );
|
||||
|
||||
/* Free scratchpads */
|
||||
ufree ( xhci->scratchpad );
|
||||
|
@ -1202,7 +1202,7 @@ static int xhci_ring_alloc ( struct xhci_device *xhci,
|
|||
}
|
||||
|
||||
/* Allocate TRBs */
|
||||
ring->trb = malloc_dma ( ring->len, xhci_align ( ring->len ) );
|
||||
ring->trb = malloc_phys ( ring->len, xhci_align ( ring->len ) );
|
||||
if ( ! ring->trb ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc_trb;
|
||||
|
@ -1218,7 +1218,7 @@ static int xhci_ring_alloc ( struct xhci_device *xhci,
|
|||
|
||||
return 0;
|
||||
|
||||
free_dma ( ring->trb, ring->len );
|
||||
free_phys ( ring->trb, ring->len );
|
||||
err_alloc_trb:
|
||||
free ( ring->iobuf );
|
||||
err_alloc_iobuf:
|
||||
|
@ -1256,7 +1256,7 @@ static void xhci_ring_free ( struct xhci_trb_ring *ring ) {
|
|||
assert ( ring->iobuf[i] == NULL );
|
||||
|
||||
/* Free TRBs */
|
||||
free_dma ( ring->trb, ring->len );
|
||||
free_phys ( ring->trb, ring->len );
|
||||
|
||||
/* Free I/O buffers */
|
||||
free ( ring->iobuf );
|
||||
|
@ -1469,7 +1469,7 @@ static int xhci_event_alloc ( struct xhci_device *xhci ) {
|
|||
/* Allocate event ring */
|
||||
count = ( 1 << XHCI_EVENT_TRBS_LOG2 );
|
||||
len = ( count * sizeof ( event->trb[0] ) );
|
||||
event->trb = malloc_dma ( len, xhci_align ( len ) );
|
||||
event->trb = malloc_phys ( len, xhci_align ( len ) );
|
||||
if ( ! event->trb ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc_trb;
|
||||
|
@ -1477,8 +1477,8 @@ static int xhci_event_alloc ( struct xhci_device *xhci ) {
|
|||
memset ( event->trb, 0, len );
|
||||
|
||||
/* Allocate event ring segment table */
|
||||
event->segment = malloc_dma ( sizeof ( event->segment[0] ),
|
||||
xhci_align ( sizeof (event->segment[0])));
|
||||
event->segment = malloc_phys ( sizeof ( event->segment[0] ),
|
||||
xhci_align ( sizeof(event->segment[0])));
|
||||
if ( ! event->segment ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc_segment;
|
||||
|
@ -1508,9 +1508,9 @@ static int xhci_event_alloc ( struct xhci_device *xhci ) {
|
|||
err_writeq_erstba:
|
||||
xhci_writeq ( xhci, 0, xhci->run + XHCI_RUN_ERDP ( 0 ) );
|
||||
err_writeq_erdp:
|
||||
free_dma ( event->trb, len );
|
||||
free_phys ( event->trb, len );
|
||||
err_alloc_segment:
|
||||
free_dma ( event->segment, sizeof ( event->segment[0] ) );
|
||||
free_phys ( event->segment, sizeof ( event->segment[0] ) );
|
||||
err_alloc_trb:
|
||||
return rc;
|
||||
}
|
||||
|
@ -1531,12 +1531,12 @@ static void xhci_event_free ( struct xhci_device *xhci ) {
|
|||
xhci_writeq ( xhci, 0, xhci->run + XHCI_RUN_ERDP ( 0 ) );
|
||||
|
||||
/* Free event ring segment table */
|
||||
free_dma ( event->segment, sizeof ( event->segment[0] ) );
|
||||
free_phys ( event->segment, sizeof ( event->segment[0] ) );
|
||||
|
||||
/* Free event ring */
|
||||
count = ( 1 << XHCI_EVENT_TRBS_LOG2 );
|
||||
len = ( count * sizeof ( event->trb[0] ) );
|
||||
free_dma ( event->trb, len );
|
||||
free_phys ( event->trb, len );
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1948,7 +1948,7 @@ static int xhci_context ( struct xhci_device *xhci, struct xhci_slot *slot,
|
|||
|
||||
/* Allocate an input context */
|
||||
len = xhci_input_context_offset ( xhci, XHCI_CTX_END );
|
||||
input = malloc_dma ( len, xhci_align ( len ) );
|
||||
input = malloc_phys ( len, xhci_align ( len ) );
|
||||
if ( ! input ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc;
|
||||
|
@ -1969,7 +1969,7 @@ static int xhci_context ( struct xhci_device *xhci, struct xhci_slot *slot,
|
|||
goto err_command;
|
||||
|
||||
err_command:
|
||||
free_dma ( input, len );
|
||||
free_phys ( input, len );
|
||||
err_alloc:
|
||||
return rc;
|
||||
}
|
||||
|
@ -2693,7 +2693,7 @@ static int xhci_device_open ( struct usb_device *usb ) {
|
|||
|
||||
/* Allocate a device context */
|
||||
len = xhci_device_context_offset ( xhci, XHCI_CTX_END );
|
||||
slot->context = malloc_dma ( len, xhci_align ( len ) );
|
||||
slot->context = malloc_phys ( len, xhci_align ( len ) );
|
||||
if ( ! slot->context ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc_context;
|
||||
|
@ -2710,7 +2710,7 @@ static int xhci_device_open ( struct usb_device *usb ) {
|
|||
return 0;
|
||||
|
||||
xhci->dcbaa[id] = 0;
|
||||
free_dma ( slot->context, len );
|
||||
free_phys ( slot->context, len );
|
||||
err_alloc_context:
|
||||
xhci->slot[id] = NULL;
|
||||
free ( slot );
|
||||
|
@ -2750,7 +2750,7 @@ static void xhci_device_close ( struct usb_device *usb ) {
|
|||
|
||||
/* Free slot */
|
||||
if ( slot->context ) {
|
||||
free_dma ( slot->context, len );
|
||||
free_phys ( slot->context, len );
|
||||
xhci->dcbaa[id] = 0;
|
||||
}
|
||||
xhci->slot[id] = NULL;
|
||||
|
|
|
@ -14,7 +14,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
|
|||
/*
|
||||
* Prototypes for the standard functions (malloc() et al) are in
|
||||
* stdlib.h. Include <ipxe/malloc.h> only if you need the
|
||||
* non-standard functions, such as malloc_dma().
|
||||
* non-standard functions, such as malloc_phys().
|
||||
*
|
||||
*/
|
||||
#include <stdlib.h>
|
||||
|
@ -32,20 +32,18 @@ extern void mpopulate ( void *start, size_t len );
|
|||
extern void mdumpfree ( void );
|
||||
|
||||
/**
|
||||
* Allocate memory for DMA
|
||||
* Allocate memory with specified physical alignment and offset
|
||||
*
|
||||
* @v size Requested size
|
||||
* @v align Physical alignment
|
||||
* @v offset Offset from physical alignment
|
||||
* @ret ptr Memory, or NULL
|
||||
*
|
||||
* Allocates physically-aligned memory for DMA.
|
||||
*
|
||||
* @c align must be a power of two. @c size may not be zero.
|
||||
*/
|
||||
static inline void * __malloc malloc_dma_offset ( size_t size,
|
||||
size_t phys_align,
|
||||
size_t offset ) {
|
||||
static inline void * __malloc malloc_phys_offset ( size_t size,
|
||||
size_t phys_align,
|
||||
size_t offset ) {
|
||||
void * ptr = alloc_memblock ( size, phys_align, offset );
|
||||
if ( ptr && size )
|
||||
VALGRIND_MALLOCLIKE_BLOCK ( ptr, size, 0, 0 );
|
||||
|
@ -53,32 +51,30 @@ static inline void * __malloc malloc_dma_offset ( size_t size,
|
|||
}
|
||||
|
||||
/**
|
||||
* Allocate memory for DMA
|
||||
* Allocate memory with specified physical alignment
|
||||
*
|
||||
* @v size Requested size
|
||||
* @v align Physical alignment
|
||||
* @ret ptr Memory, or NULL
|
||||
*
|
||||
* Allocates physically-aligned memory for DMA.
|
||||
*
|
||||
* @c align must be a power of two. @c size may not be zero.
|
||||
*/
|
||||
static inline void * __malloc malloc_dma ( size_t size, size_t phys_align ) {
|
||||
return malloc_dma_offset ( size, phys_align, 0 );
|
||||
static inline void * __malloc malloc_phys ( size_t size, size_t phys_align ) {
|
||||
return malloc_phys_offset ( size, phys_align, 0 );
|
||||
}
|
||||
|
||||
/**
|
||||
* Free memory allocated with malloc_dma()
|
||||
* Free memory allocated with malloc_phys()
|
||||
*
|
||||
* @v ptr Memory allocated by malloc_dma(), or NULL
|
||||
* @v size Size of memory, as passed to malloc_dma()
|
||||
* @v ptr Memory allocated by malloc_phys(), or NULL
|
||||
* @v size Size of memory, as passed to malloc_phys()
|
||||
*
|
||||
* Memory allocated with malloc_dma() can only be freed with
|
||||
* free_dma(); it cannot be freed with the standard free().
|
||||
* Memory allocated with malloc_phys() can only be freed with
|
||||
* free_phys(); it cannot be freed with the standard free().
|
||||
*
|
||||
* If @c ptr is NULL, no action is taken.
|
||||
*/
|
||||
static inline void free_dma ( void *ptr, size_t size ) {
|
||||
static inline void free_phys ( void *ptr, size_t size ) {
|
||||
VALGRIND_FREELIKE_BLOCK ( ptr, 0 );
|
||||
free_memblock ( ptr, size );
|
||||
}
|
||||
|
|
|
@ -434,7 +434,7 @@ int vmbus_open ( struct vmbus_device *vmdev,
|
|||
len = ( sizeof ( *vmdev->out ) + out_len +
|
||||
sizeof ( *vmdev->in ) + in_len );
|
||||
assert ( ( len % PAGE_SIZE ) == 0 );
|
||||
ring = malloc_dma ( len, PAGE_SIZE );
|
||||
ring = malloc_phys ( len, PAGE_SIZE );
|
||||
if ( ! ring ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_alloc_ring;
|
||||
|
@ -509,7 +509,7 @@ int vmbus_open ( struct vmbus_device *vmdev,
|
|||
err_post_message:
|
||||
vmbus_gpadl_teardown ( vmdev, vmdev->gpadl );
|
||||
err_establish:
|
||||
free_dma ( ring, len );
|
||||
free_phys ( ring, len );
|
||||
err_alloc_ring:
|
||||
free ( packet );
|
||||
err_alloc_packet:
|
||||
|
@ -555,7 +555,7 @@ void vmbus_close ( struct vmbus_device *vmdev ) {
|
|||
/* Free ring buffer */
|
||||
len = ( sizeof ( *vmdev->out ) + vmdev->out_len +
|
||||
sizeof ( *vmdev->in ) + vmdev->in_len );
|
||||
free_dma ( vmdev->out, len );
|
||||
free_phys ( vmdev->out, len );
|
||||
vmdev->out = NULL;
|
||||
vmdev->in = NULL;
|
||||
|
||||
|
|
Loading…
Reference in New Issue