mirror of https://github.com/ipxe/ipxe.git
[virtio] Update driver to use DMA API
Signed-off-by: Aaron Young <aaron.young@oracle.com>pull/516/head
parent
2265a65191
commit
f24a2794e1
|
@ -17,37 +17,47 @@
|
|||
#include "ipxe/io.h"
|
||||
#include "ipxe/iomap.h"
|
||||
#include "ipxe/pci.h"
|
||||
#include "ipxe/dma.h"
|
||||
#include "ipxe/reboot.h"
|
||||
#include "ipxe/virtio-pci.h"
|
||||
#include "ipxe/virtio-ring.h"
|
||||
|
||||
static int vp_alloc_vq(struct vring_virtqueue *vq, u16 num)
|
||||
static int vp_alloc_vq(struct vring_virtqueue *vq, u16 num, size_t header_size)
|
||||
{
|
||||
size_t queue_size = PAGE_MASK + vring_size(num);
|
||||
size_t ring_size = PAGE_MASK + vring_size(num);
|
||||
size_t vdata_size = num * sizeof(void *);
|
||||
size_t queue_size = ring_size + vdata_size + header_size;
|
||||
|
||||
vq->queue = zalloc(queue_size + vdata_size);
|
||||
vq->queue = dma_alloc(vq->dma, &vq->map, queue_size, queue_size);
|
||||
if (!vq->queue) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memset ( vq->queue, 0, queue_size );
|
||||
vq->queue_size = queue_size;
|
||||
|
||||
/* vdata immediately follows the ring */
|
||||
vq->vdata = (void **)(vq->queue + queue_size);
|
||||
vq->vdata = (void **)(vq->queue + ring_size);
|
||||
|
||||
/* empty header immediately follows vdata */
|
||||
vq->empty_header = (struct virtio_net_hdr_modern *)(vq->queue + ring_size + vdata_size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void vp_free_vq(struct vring_virtqueue *vq)
|
||||
{
|
||||
if (vq->queue) {
|
||||
free(vq->queue);
|
||||
if (vq->queue && vq->queue_size) {
|
||||
dma_free(&vq->map, vq->queue, vq->queue_size);
|
||||
vq->queue = NULL;
|
||||
vq->vdata = NULL;
|
||||
vq->queue_size = 0;
|
||||
}
|
||||
}
|
||||
|
||||
int vp_find_vq(unsigned int ioaddr, int queue_index,
|
||||
struct vring_virtqueue *vq)
|
||||
struct vring_virtqueue *vq, struct dma_device *dma_dev,
|
||||
size_t header_size)
|
||||
{
|
||||
struct vring * vr = &vq->vring;
|
||||
u16 num;
|
||||
|
@ -73,9 +83,10 @@ int vp_find_vq(unsigned int ioaddr, int queue_index,
|
|||
}
|
||||
|
||||
vq->queue_index = queue_index;
|
||||
vq->dma = dma_dev;
|
||||
|
||||
/* initialize the queue */
|
||||
rc = vp_alloc_vq(vq, num);
|
||||
rc = vp_alloc_vq(vq, num, header_size);
|
||||
if (rc) {
|
||||
DBG("VIRTIO-PCI ERROR: failed to allocate queue memory\n");
|
||||
return rc;
|
||||
|
@ -87,8 +98,7 @@ int vp_find_vq(unsigned int ioaddr, int queue_index,
|
|||
* NOTE: vr->desc is initialized by vring_init()
|
||||
*/
|
||||
|
||||
outl((unsigned long)virt_to_phys(vr->desc) >> PAGE_SHIFT,
|
||||
ioaddr + VIRTIO_PCI_QUEUE_PFN);
|
||||
outl(dma(&vq->map, vr->desc) >> PAGE_SHIFT, ioaddr + VIRTIO_PCI_QUEUE_PFN);
|
||||
|
||||
return num;
|
||||
}
|
||||
|
@ -348,7 +358,8 @@ void vpm_notify(struct virtio_pci_modern_device *vdev,
|
|||
}
|
||||
|
||||
int vpm_find_vqs(struct virtio_pci_modern_device *vdev,
|
||||
unsigned nvqs, struct vring_virtqueue *vqs)
|
||||
unsigned nvqs, struct vring_virtqueue *vqs,
|
||||
struct dma_device *dma_dev, size_t header_size)
|
||||
{
|
||||
unsigned i;
|
||||
struct vring_virtqueue *vq;
|
||||
|
@ -392,11 +403,12 @@ int vpm_find_vqs(struct virtio_pci_modern_device *vdev,
|
|||
|
||||
vq = &vqs[i];
|
||||
vq->queue_index = i;
|
||||
vq->dma = dma_dev;
|
||||
|
||||
/* get offset of notification word for this vq */
|
||||
off = vpm_ioread16(vdev, &vdev->common, COMMON_OFFSET(queue_notify_off));
|
||||
|
||||
err = vp_alloc_vq(vq, size);
|
||||
err = vp_alloc_vq(vq, size, header_size);
|
||||
if (err) {
|
||||
DBG("VIRTIO-PCI %p: failed to allocate queue memory\n", vdev);
|
||||
return err;
|
||||
|
@ -406,13 +418,16 @@ int vpm_find_vqs(struct virtio_pci_modern_device *vdev,
|
|||
/* activate the queue */
|
||||
vpm_iowrite16(vdev, &vdev->common, size, COMMON_OFFSET(queue_size));
|
||||
|
||||
vpm_iowrite64(vdev, &vdev->common, virt_to_phys(vq->vring.desc),
|
||||
vpm_iowrite64(vdev, &vdev->common,
|
||||
dma(&vq->map, vq->vring.desc),
|
||||
COMMON_OFFSET(queue_desc_lo),
|
||||
COMMON_OFFSET(queue_desc_hi));
|
||||
vpm_iowrite64(vdev, &vdev->common, virt_to_phys(vq->vring.avail),
|
||||
vpm_iowrite64(vdev, &vdev->common,
|
||||
dma(&vq->map, vq->vring.avail),
|
||||
COMMON_OFFSET(queue_avail_lo),
|
||||
COMMON_OFFSET(queue_avail_hi));
|
||||
vpm_iowrite64(vdev, &vdev->common, virt_to_phys(vq->vring.used),
|
||||
vpm_iowrite64(vdev, &vdev->common,
|
||||
dma(&vq->map, vq->vring.used),
|
||||
COMMON_OFFSET(queue_used_lo),
|
||||
COMMON_OFFSET(queue_used_hi));
|
||||
|
||||
|
|
|
@ -98,7 +98,7 @@ void vring_add_buf(struct vring_virtqueue *vq,
|
|||
for (i = head; out; i = vr->desc[i].next, out--) {
|
||||
|
||||
vr->desc[i].flags = VRING_DESC_F_NEXT;
|
||||
vr->desc[i].addr = (u64)virt_to_phys(list->addr);
|
||||
vr->desc[i].addr = list->addr;
|
||||
vr->desc[i].len = list->length;
|
||||
prev = i;
|
||||
list++;
|
||||
|
@ -106,7 +106,7 @@ void vring_add_buf(struct vring_virtqueue *vq,
|
|||
for ( ; in; i = vr->desc[i].next, in--) {
|
||||
|
||||
vr->desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
|
||||
vr->desc[i].addr = (u64)virt_to_phys(list->addr);
|
||||
vr->desc[i].addr = list->addr;
|
||||
vr->desc[i].len = list->length;
|
||||
prev = i;
|
||||
list++;
|
||||
|
|
|
@ -29,6 +29,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
|
|||
#include <ipxe/iobuf.h>
|
||||
#include <ipxe/netdevice.h>
|
||||
#include <ipxe/pci.h>
|
||||
#include <ipxe/dma.h>
|
||||
#include <ipxe/if_ether.h>
|
||||
#include <ipxe/ethernet.h>
|
||||
#include <ipxe/virtio-pci.h>
|
||||
|
@ -99,8 +100,9 @@ struct virtnet_nic {
|
|||
/** Pending rx packet count */
|
||||
unsigned int rx_num_iobufs;
|
||||
|
||||
/** Virtio net dummy packet headers */
|
||||
struct virtio_net_hdr_modern empty_header[QUEUE_NB];
|
||||
/** DMA device */
|
||||
struct dma_device *dma;
|
||||
|
||||
};
|
||||
|
||||
/** Add an iobuf to a virtqueue
|
||||
|
@ -115,7 +117,7 @@ static void virtnet_enqueue_iob ( struct net_device *netdev,
|
|||
int vq_idx, struct io_buffer *iobuf ) {
|
||||
struct virtnet_nic *virtnet = netdev->priv;
|
||||
struct vring_virtqueue *vq = &virtnet->virtqueue[vq_idx];
|
||||
struct virtio_net_hdr_modern *header = &virtnet->empty_header[vq_idx];
|
||||
struct virtio_net_hdr_modern *header = vq->empty_header;
|
||||
unsigned int out = ( vq_idx == TX_INDEX ) ? 2 : 0;
|
||||
unsigned int in = ( vq_idx == TX_INDEX ) ? 0 : 2;
|
||||
size_t header_len = ( virtnet->virtio_version ?
|
||||
|
@ -132,11 +134,11 @@ static void virtnet_enqueue_iob ( struct net_device *netdev,
|
|||
* to header->flags for received packets. Work around
|
||||
* this by using separate RX and TX headers.
|
||||
*/
|
||||
.addr = ( char* ) header,
|
||||
.addr = dma ( &vq->map, header ),
|
||||
.length = header_len,
|
||||
},
|
||||
{
|
||||
.addr = ( char* ) iobuf->data,
|
||||
.addr = iob_dma ( iobuf ),
|
||||
.length = iob_len ( iobuf ),
|
||||
},
|
||||
};
|
||||
|
@ -161,7 +163,7 @@ static void virtnet_refill_rx_virtqueue ( struct net_device *netdev ) {
|
|||
struct io_buffer *iobuf;
|
||||
|
||||
/* Try to allocate a buffer, stop for now if out of memory */
|
||||
iobuf = alloc_iob ( len );
|
||||
iobuf = alloc_rx_iob ( len, virtnet->dma );
|
||||
if ( ! iobuf )
|
||||
break;
|
||||
|
||||
|
@ -215,7 +217,8 @@ static int virtnet_open_legacy ( struct net_device *netdev ) {
|
|||
|
||||
/* Initialize rx/tx virtqueues */
|
||||
for ( i = 0; i < QUEUE_NB; i++ ) {
|
||||
if ( vp_find_vq ( ioaddr, i, &virtnet->virtqueue[i] ) == -1 ) {
|
||||
if ( vp_find_vq ( ioaddr, i, &virtnet->virtqueue[i], virtnet->dma,
|
||||
sizeof ( struct virtio_net_hdr_modern ) ) == -1 ) {
|
||||
DBGC ( virtnet, "VIRTIO-NET %p cannot register queue %d\n",
|
||||
virtnet, i );
|
||||
virtnet_free_virtqueues ( netdev );
|
||||
|
@ -280,7 +283,8 @@ static int virtnet_open_modern ( struct net_device *netdev ) {
|
|||
}
|
||||
|
||||
/* Initialize rx/tx virtqueues */
|
||||
if ( vpm_find_vqs ( &virtnet->vdev, QUEUE_NB, virtnet->virtqueue ) ) {
|
||||
if ( vpm_find_vqs ( &virtnet->vdev, QUEUE_NB, virtnet->virtqueue,
|
||||
virtnet->dma, sizeof ( struct virtio_net_hdr_modern ) ) ) {
|
||||
DBGC ( virtnet, "VIRTIO-NET %p cannot register queues\n",
|
||||
virtnet );
|
||||
virtnet_free_virtqueues ( netdev );
|
||||
|
@ -335,7 +339,7 @@ static void virtnet_close ( struct net_device *netdev ) {
|
|||
|
||||
/* Free rx iobufs */
|
||||
list_for_each_entry_safe ( iobuf, next_iobuf, &virtnet->rx_iobufs, list ) {
|
||||
free_iob ( iobuf );
|
||||
free_rx_iob ( iobuf );
|
||||
}
|
||||
INIT_LIST_HEAD ( &virtnet->rx_iobufs );
|
||||
virtnet->rx_num_iobufs = 0;
|
||||
|
@ -478,6 +482,12 @@ static int virtnet_probe_legacy ( struct pci_device *pci ) {
|
|||
|
||||
/* Enable PCI bus master and reset NIC */
|
||||
adjust_pci_device ( pci );
|
||||
|
||||
/* Configure DMA */
|
||||
virtnet->dma = &pci->dma;
|
||||
dma_set_mask_64bit ( virtnet->dma );
|
||||
netdev->dma = virtnet->dma;
|
||||
|
||||
vp_reset ( ioaddr );
|
||||
|
||||
/* Load MAC address and MTU */
|
||||
|
@ -586,6 +596,11 @@ static int virtnet_probe_modern ( struct pci_device *pci, int *found_dev ) {
|
|||
/* Enable the PCI device */
|
||||
adjust_pci_device ( pci );
|
||||
|
||||
/* Configure DMA */
|
||||
virtnet->dma = &pci->dma;
|
||||
dma_set_mask_64bit ( virtnet->dma );
|
||||
netdev->dma = virtnet->dma;
|
||||
|
||||
/* Reset the device and set initial status bits */
|
||||
vpm_reset ( &virtnet->vdev );
|
||||
vpm_add_status ( &virtnet->vdev, VIRTIO_CONFIG_S_ACKNOWLEDGE );
|
||||
|
@ -633,7 +648,6 @@ err_mac_address:
|
|||
vpm_reset ( &virtnet->vdev );
|
||||
netdev_nullify ( netdev );
|
||||
netdev_put ( netdev );
|
||||
|
||||
virtio_pci_unmap_capability ( &virtnet->vdev.device );
|
||||
err_map_device:
|
||||
virtio_pci_unmap_capability ( &virtnet->vdev.isr );
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
#ifndef _VIRTIO_PCI_H_
|
||||
# define _VIRTIO_PCI_H_
|
||||
|
||||
#include <ipxe/dma.h>
|
||||
|
||||
/* A 32-bit r/o bitmask of the features supported by the host */
|
||||
#define VIRTIO_PCI_HOST_FEATURES 0
|
||||
|
||||
|
@ -198,7 +200,8 @@ struct vring_virtqueue;
|
|||
|
||||
void vp_free_vq(struct vring_virtqueue *vq);
|
||||
int vp_find_vq(unsigned int ioaddr, int queue_index,
|
||||
struct vring_virtqueue *vq);
|
||||
struct vring_virtqueue *vq, struct dma_device *dma_dev,
|
||||
size_t header_size);
|
||||
|
||||
|
||||
/* Virtio 1.0 I/O routines abstract away the three possible HW access
|
||||
|
@ -298,7 +301,8 @@ void vpm_notify(struct virtio_pci_modern_device *vdev,
|
|||
struct vring_virtqueue *vq);
|
||||
|
||||
int vpm_find_vqs(struct virtio_pci_modern_device *vdev,
|
||||
unsigned nvqs, struct vring_virtqueue *vqs);
|
||||
unsigned nvqs, struct vring_virtqueue *vqs,
|
||||
struct dma_device *dma_dev, size_t header_size);
|
||||
|
||||
int virtio_pci_find_capability(struct pci_device *pci, uint8_t cfg_type);
|
||||
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
# define _VIRTIO_RING_H_
|
||||
|
||||
#include <ipxe/virtio-pci.h>
|
||||
#include <ipxe/dma.h>
|
||||
|
||||
/* Status byte for guest to report progress, and synchronize features. */
|
||||
/* We have seen device and processed generic fields (VIRTIO_CONFIG_F_VIRTIO) */
|
||||
|
@ -74,17 +75,21 @@ struct vring {
|
|||
|
||||
struct vring_virtqueue {
|
||||
unsigned char *queue;
|
||||
size_t queue_size;
|
||||
struct dma_mapping map;
|
||||
struct dma_device *dma;
|
||||
struct vring vring;
|
||||
u16 free_head;
|
||||
u16 last_used_idx;
|
||||
void **vdata;
|
||||
struct virtio_net_hdr_modern *empty_header;
|
||||
/* PCI */
|
||||
int queue_index;
|
||||
struct virtio_pci_region notification;
|
||||
};
|
||||
|
||||
struct vring_list {
|
||||
char *addr;
|
||||
physaddr_t addr;
|
||||
unsigned int length;
|
||||
};
|
||||
|
||||
|
|
Loading…
Reference in New Issue