mirror of https://github.com/ipxe/ipxe.git
[golan] Bug fixes and improved paging allocation method
Updates: - revert Support for clear interrupt via BAR Signed-off-by: Raed Salem <raeds@mellanox.com> Signed-off-by: Michael Brown <mcb30@ipxe.org>pull/58/merge
parent
ce240c8c2d
commit
1ff1eebcf7
|
@ -89,6 +89,7 @@ SRCDIRS += drivers/infiniband/mlx_utils/mlx_lib/mlx_nvconfig
|
|||
SRCDIRS += drivers/infiniband/mlx_utils/mlx_lib/mlx_vmac
|
||||
SRCDIRS += drivers/infiniband/mlx_utils/mlx_lib/mlx_blink_leds
|
||||
SRCDIRS += drivers/infiniband/mlx_utils/mlx_lib/mlx_link_speed
|
||||
SRCDIRS += drivers/infiniband/mlx_utils/mlx_lib/mlx_mtu
|
||||
SRCDIRS += drivers/infiniband/mlx_nodnic/src
|
||||
SRCDIRS += drivers/usb
|
||||
SRCDIRS += interface/pxe interface/efi interface/smbios
|
||||
|
|
|
@ -44,6 +44,7 @@ FILE_LICENCE ( GPL2_OR_LATER );
|
|||
#include "mlx_utils/mlx_lib/mlx_nvconfig/mlx_nvconfig_defaults.h"
|
||||
#include "mlx_utils/include/public/mlx_pci_gw.h"
|
||||
#include "mlx_utils/mlx_lib/mlx_vmac/mlx_vmac.h"
|
||||
#include "mlx_utils/mlx_lib/mlx_mtu/mlx_mtu.h"
|
||||
|
||||
/***************************************************************************
|
||||
*
|
||||
|
@ -823,6 +824,7 @@ static void flexboot_nodnic_eth_complete_recv ( struct ib_device *ibdev __unused
|
|||
netdev_rx_err ( netdev, iobuf, -ENOTTY );
|
||||
return;
|
||||
}
|
||||
|
||||
netdev_rx ( netdev, iobuf );
|
||||
}
|
||||
|
||||
|
@ -907,6 +909,7 @@ static int flexboot_nodnic_eth_open ( struct net_device *netdev ) {
|
|||
list_del(&port->eth_qp->send.list);
|
||||
list_add ( &port->eth_qp->send.list, &port->eth_cq->work_queues );
|
||||
port->eth_qp->recv.cq = port->eth_cq;
|
||||
port->cmdsn = 0;
|
||||
list_del(&port->eth_qp->recv.list);
|
||||
list_add ( &port->eth_qp->recv.list, &port->eth_cq->work_queues );
|
||||
|
||||
|
@ -1445,12 +1448,6 @@ static int flexboot_nodnic_alloc_uar ( struct flexboot_nodnic *flexboot_nodnic )
|
|||
struct pci_device *pci = flexboot_nodnic->pci;
|
||||
nodnic_uar *uar = &flexboot_nodnic->port[0].port_priv.device->uar;
|
||||
|
||||
if ( ! flexboot_nodnic->device_priv.utils ) {
|
||||
uar->virt = NULL;
|
||||
DBGC ( flexboot_nodnic, "%s: mlx_utils is not initialized \n", __FUNCTION__ );
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if ( ! flexboot_nodnic->device_priv.device_cap.support_uar_tx_db ) {
|
||||
DBGC ( flexboot_nodnic, "%s: tx db using uar is not supported \n", __FUNCTION__ );
|
||||
return -ENOTSUP;
|
||||
|
@ -1467,6 +1464,18 @@ static int flexboot_nodnic_alloc_uar ( struct flexboot_nodnic *flexboot_nodnic )
|
|||
return status;
|
||||
}
|
||||
|
||||
static int flexboot_nodnic_dealloc_uar ( struct flexboot_nodnic *flexboot_nodnic ) {
|
||||
nodnic_uar *uar = &flexboot_nodnic->port[0].port_priv.device->uar;
|
||||
|
||||
if ( uar->virt ) {
|
||||
iounmap( uar->virt );
|
||||
uar->virt = NULL;
|
||||
}
|
||||
|
||||
return MLX_SUCCESS;
|
||||
}
|
||||
|
||||
|
||||
int flexboot_nodnic_probe ( struct pci_device *pci,
|
||||
struct flexboot_nodnic_callbacks *callbacks,
|
||||
void *drv_priv __unused ) {
|
||||
|
@ -1508,6 +1517,10 @@ int flexboot_nodnic_probe ( struct pci_device *pci,
|
|||
MLX_FATAL_CHECK_STATUS(status, get_cap_err,
|
||||
"nodnic_device_get_cap failed");
|
||||
|
||||
if ( mlx_set_admin_mtu ( device_priv->utils, 1, EN_DEFAULT_ADMIN_MTU ) ) {
|
||||
MLX_DEBUG_ERROR( device_priv->utils, "Failed to set admin mtu\n" );
|
||||
}
|
||||
|
||||
status = flexboot_nodnic_set_port_masking ( flexboot_nodnic_priv );
|
||||
MLX_FATAL_CHECK_STATUS(status, err_set_masking,
|
||||
"flexboot_nodnic_set_port_masking failed");
|
||||
|
@ -1522,7 +1535,7 @@ int flexboot_nodnic_probe ( struct pci_device *pci,
|
|||
"flexboot_nodnic_thin_init_ports failed");
|
||||
|
||||
if ( ( status = flexboot_nodnic_alloc_uar ( flexboot_nodnic_priv ) ) ) {
|
||||
DBGC(flexboot_nodnic_priv, "%s: flexboot_nodnic_pci_init failed"
|
||||
DBGC(flexboot_nodnic_priv, "%s: flexboot_nodnic_alloc_uar failed"
|
||||
" ( status = %d )\n",__FUNCTION__, status );
|
||||
}
|
||||
|
||||
|
@ -1550,6 +1563,7 @@ int flexboot_nodnic_probe ( struct pci_device *pci,
|
|||
flexboot_nodnic_ports_unregister_dev ( flexboot_nodnic_priv );
|
||||
reg_err:
|
||||
err_set_ports_types:
|
||||
flexboot_nodnic_dealloc_uar ( flexboot_nodnic_priv );
|
||||
err_thin_init_ports:
|
||||
err_alloc_ibdev:
|
||||
err_set_masking:
|
||||
|
@ -1568,6 +1582,7 @@ void flexboot_nodnic_remove ( struct pci_device *pci )
|
|||
struct flexboot_nodnic *flexboot_nodnic_priv = pci_get_drvdata ( pci );
|
||||
nodnic_device_priv *device_priv = & ( flexboot_nodnic_priv->device_priv );
|
||||
|
||||
flexboot_nodnic_dealloc_uar ( flexboot_nodnic_priv );
|
||||
flexboot_nodnic_ports_unregister_dev ( flexboot_nodnic_priv );
|
||||
nodnic_device_teardown( device_priv );
|
||||
free_mlx_utils ( & device_priv->utils );
|
||||
|
|
|
@ -42,6 +42,7 @@ FILE_LICENCE ( GPL2_OR_LATER );
|
|||
#define FLEXBOOT_NODNIC_PAGE_SHIFT 12
|
||||
#define FLEXBOOT_NODNIC_PAGE_SIZE (1 << FLEXBOOT_NODNIC_PAGE_SHIFT)
|
||||
#define FLEXBOOT_NODNIC_PAGE_MASK (FLEXBOOT_NODNIC_PAGE_SIZE - 1)
|
||||
#define EN_DEFAULT_ADMIN_MTU 1522
|
||||
|
||||
/* Port protocol */
|
||||
enum flexboot_nodnic_protocol {
|
||||
|
|
|
@ -42,80 +42,47 @@ FILE_LICENCE ( GPL2_OR_LATER );
|
|||
#include "mlx_utils/include/public/mlx_bail.h"
|
||||
#include "mlx_utils/mlx_lib/mlx_link_speed/mlx_link_speed.h"
|
||||
|
||||
|
||||
#define DEVICE_IS_CIB( device ) ( device == 0x1011 )
|
||||
|
||||
/******************************************************************************/
|
||||
/************* Very simple memory management for umalloced pages **************/
|
||||
/******* Temporary solution until full memory management is implemented *******/
|
||||
/******************************************************************************/
|
||||
|
||||
struct golan_page {
|
||||
struct list_head list;
|
||||
userptr_t addr;
|
||||
};
|
||||
|
||||
static void golan_free_pages ( struct list_head *head ) {
|
||||
struct golan_page *page, *tmp;
|
||||
list_for_each_entry_safe ( page, tmp, head, list ) {
|
||||
list_del ( &page->list );
|
||||
ufree ( page->addr );
|
||||
free ( page );
|
||||
static void golan_free_fw_areas ( struct golan *golan ) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < GOLAN_FW_AREAS_NUM; i++) {
|
||||
if ( golan->fw_areas[i].area ) {
|
||||
ufree ( golan->fw_areas[i].area );
|
||||
golan->fw_areas[i].area = UNULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int golan_init_pages ( struct list_head *head ) {
|
||||
int rc = 0;
|
||||
static int golan_init_fw_areas ( struct golan *golan ) {
|
||||
int rc = 0, i = 0;
|
||||
|
||||
if ( !head ) {
|
||||
if ( ! golan ) {
|
||||
rc = -EINVAL;
|
||||
goto err_golan_init_pages_bad_param;
|
||||
goto err_golan_init_fw_areas_bad_param;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD ( head );
|
||||
for (i = 0; i < GOLAN_FW_AREAS_NUM; i++)
|
||||
golan->fw_areas[i].area = UNULL;
|
||||
|
||||
return rc;
|
||||
|
||||
err_golan_init_pages_bad_param:
|
||||
err_golan_init_fw_areas_bad_param:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static userptr_t golan_get_page ( struct list_head *head ) {
|
||||
struct golan_page *page;
|
||||
userptr_t addr;
|
||||
|
||||
if ( list_empty ( head ) ) {
|
||||
addr = umalloc ( GOLAN_PAGE_SIZE );
|
||||
if ( addr == UNULL ) {
|
||||
goto err_golan_iget_page_alloc_page;
|
||||
}
|
||||
} else {
|
||||
page = list_first_entry ( head, struct golan_page, list );
|
||||
list_del ( &page->list );
|
||||
addr = page->addr;
|
||||
free ( page );
|
||||
}
|
||||
err_golan_iget_page_alloc_page:
|
||||
return addr;
|
||||
}
|
||||
|
||||
static int golan_return_page ( struct list_head *head,
|
||||
userptr_t addr ) {
|
||||
struct golan_page *new_entry;
|
||||
int rc = 0;
|
||||
|
||||
if ( ! head ) {
|
||||
rc = -EINVAL;
|
||||
goto err_golan_return_page_bad_param;
|
||||
}
|
||||
new_entry = zalloc ( sizeof ( *new_entry ) );
|
||||
if ( new_entry == NULL ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_golan_return_page_alloc_page;
|
||||
}
|
||||
new_entry->addr = addr;
|
||||
list_add_tail( &new_entry->list, head );
|
||||
|
||||
err_golan_return_page_alloc_page:
|
||||
err_golan_return_page_bad_param:
|
||||
return rc;
|
||||
}
|
||||
/******************************************************************************/
|
||||
|
||||
const char *golan_qp_state_as_string[] = {
|
||||
|
@ -177,16 +144,6 @@ static inline u8 xor8_buf(void *buf, int len)
|
|||
return sum;
|
||||
}
|
||||
|
||||
static inline int verify_block_sig(struct golan_cmd_prot_block *block)
|
||||
{
|
||||
if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff)
|
||||
return -EINVAL;
|
||||
|
||||
if (xor8_buf(block, sizeof(*block)) != 0xff)
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline const char *cmd_status_str(u8 status)
|
||||
{
|
||||
switch (status) {
|
||||
|
@ -258,24 +215,6 @@ static inline void golan_calc_sig(struct golan *golan, uint32_t cmd_idx,
|
|||
cmd->sig = ~xor8_buf(cmd, sizeof(*cmd));
|
||||
}
|
||||
|
||||
/**
|
||||
* Get Golan FW
|
||||
*/
|
||||
static int fw_ver_and_cmdif ( struct golan *golan ) {
|
||||
DBGC (golan ,"\n[%x:%x]rev maj.min.submin = %x.%x.%x cmdif = %x\n",
|
||||
golan->iseg->fw_rev,
|
||||
golan->iseg->cmdif_rev_fw_sub,
|
||||
fw_rev_maj ( golan ), fw_rev_min ( golan ),
|
||||
fw_rev_sub ( golan ), cmdif_rev ( golan));
|
||||
|
||||
if (cmdif_rev ( golan) != PXE_CMDIF_REF) {
|
||||
DBGC (golan ,"CMDIF %d not supported current is %d\n",
|
||||
cmdif_rev ( golan ), PXE_CMDIF_REF);
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void show_out_status(uint32_t *out)
|
||||
{
|
||||
DBG("%x\n", be32_to_cpu(out[0]));
|
||||
|
@ -466,10 +405,8 @@ static inline int golan_take_pages ( struct golan *golan, uint32_t pages, __be16
|
|||
|
||||
while ( pages > 0 ) {
|
||||
uint32_t pas_num = min(pages, MAX_PASE_MBOX);
|
||||
unsigned i;
|
||||
struct golan_cmd_layout *cmd;
|
||||
struct golan_manage_pages_inbox *in;
|
||||
struct golan_manage_pages_outbox_data *out;
|
||||
|
||||
size_ibox = sizeof(struct golan_manage_pages_inbox) + (pas_num * GOLAN_PAS_SIZE);
|
||||
size_obox = sizeof(struct golan_manage_pages_outbox) + (pas_num * GOLAN_PAS_SIZE);
|
||||
|
@ -485,11 +422,7 @@ static inline int golan_take_pages ( struct golan *golan, uint32_t pages, __be16
|
|||
in->num_entries = cpu_to_be32(pas_num);
|
||||
|
||||
if ( ( rc = send_command_and_wait(golan, MEM_CMD_IDX, MEM_MBOX, MEM_MBOX, __FUNCTION__) ) == 0 ) {
|
||||
out = (struct golan_manage_pages_outbox_data *)GET_OUTBOX(golan, MEM_MBOX);
|
||||
out_num_entries = be32_to_cpu(((struct golan_manage_pages_outbox *)(cmd->out))->num_entries);
|
||||
for (i = 0; i < out_num_entries; ++i) {
|
||||
golan_return_page ( &golan->pages, ( BE64_BUS_2_USR( out->pas[i] ) ) );
|
||||
}
|
||||
} else {
|
||||
if ( rc == -EBUSY ) {
|
||||
DBGC (golan ,"HCA is busy (rc = -EBUSY)\n" );
|
||||
|
@ -506,17 +439,29 @@ static inline int golan_take_pages ( struct golan *golan, uint32_t pages, __be16
|
|||
pages -= out_num_entries;
|
||||
}
|
||||
DBGC( golan , "%s Pages handled\n", __FUNCTION__);
|
||||
return 0;
|
||||
return rc;
|
||||
}
|
||||
|
||||
static inline int golan_provide_pages ( struct golan *golan , uint32_t pages, __be16 func_id ) {
|
||||
static inline int golan_provide_pages ( struct golan *golan , uint32_t pages
|
||||
, __be16 func_id,struct golan_firmware_area *fw_area) {
|
||||
struct mbox *mailbox;
|
||||
int size_ibox = 0;
|
||||
int size_obox = 0;
|
||||
int rc = 0;
|
||||
userptr_t next_page_addr = UNULL;
|
||||
|
||||
DBGC(golan, "%s\n", __FUNCTION__);
|
||||
|
||||
if ( ! fw_area->area ) {
|
||||
fw_area->area = umalloc ( GOLAN_PAGE_SIZE * pages );
|
||||
if ( fw_area->area == UNULL ) {
|
||||
rc = -ENOMEM;
|
||||
DBGC (golan ,"Failed to allocated %d pages \n",pages);
|
||||
goto err_golan_alloc_fw_area;
|
||||
}
|
||||
fw_area->npages = pages;
|
||||
}
|
||||
assert ( fw_area->npages == pages );
|
||||
next_page_addr = fw_area->area;
|
||||
while ( pages > 0 ) {
|
||||
uint32_t pas_num = min(pages, MAX_PASE_MBOX);
|
||||
unsigned i, j;
|
||||
|
@ -538,12 +483,9 @@ static inline int golan_provide_pages ( struct golan *golan , uint32_t pages, __
|
|||
in->func_id = func_id; /* Already BE */
|
||||
in->num_entries = cpu_to_be32(pas_num);
|
||||
|
||||
for ( i = 0 , j = MANAGE_PAGES_PSA_OFFSET; i < pas_num; ++i ,++j ) {
|
||||
if ( ! ( addr = golan_get_page ( & golan->pages ) ) ) {
|
||||
rc = -ENOMEM;
|
||||
DBGC (golan ,"Couldnt allocated page \n");
|
||||
goto malloc_dma_failed;
|
||||
}
|
||||
for ( i = 0 , j = MANAGE_PAGES_PSA_OFFSET; i < pas_num; ++i ,++j,
|
||||
next_page_addr += GOLAN_PAGE_SIZE ) {
|
||||
addr = next_page_addr;
|
||||
if (GOLAN_PAGE_MASK & user_to_phys(addr, 0)) {
|
||||
DBGC (golan ,"Addr not Page alligned [%lx %lx]\n", user_to_phys(addr, 0), addr);
|
||||
}
|
||||
|
@ -563,7 +505,6 @@ static inline int golan_provide_pages ( struct golan *golan , uint32_t pages, __
|
|||
get_cmd( golan , MEM_CMD_IDX )->status_own,
|
||||
be32_to_cpu(CMD_SYND(golan, MEM_CMD_IDX)), pas_num);
|
||||
}
|
||||
golan_return_page ( &golan->pages ,addr );
|
||||
goto err_send_command;
|
||||
}
|
||||
}
|
||||
|
@ -571,7 +512,7 @@ static inline int golan_provide_pages ( struct golan *golan , uint32_t pages, __
|
|||
return 0;
|
||||
|
||||
err_send_command:
|
||||
malloc_dma_failed:
|
||||
err_golan_alloc_fw_area:
|
||||
/* Go over In box and free pages */
|
||||
/* Send Error to FW */
|
||||
/* What is next - Disable HCA? */
|
||||
|
@ -609,7 +550,7 @@ static inline int golan_handle_pages(struct golan *golan,
|
|||
total_pages = (( pages >= 0 ) ? pages : ( pages * ( -1 ) ));
|
||||
|
||||
if ( mode == GOLAN_PAGES_GIVE ) {
|
||||
rc = golan_provide_pages(golan, total_pages, func_id);
|
||||
rc = golan_provide_pages(golan, total_pages, func_id, & ( golan->fw_areas[qry-1] ));
|
||||
} else {
|
||||
rc = golan_take_pages(golan, golan->total_dma_pages, func_id);
|
||||
golan->total_dma_pages = 0;
|
||||
|
@ -799,16 +740,14 @@ static int golan_create_eq(struct golan *golan)
|
|||
struct golan_cmd_layout *cmd;
|
||||
struct golan_create_eq_mbox_out *out;
|
||||
int rc, i;
|
||||
userptr_t addr;
|
||||
|
||||
eq->cons_index = 0;
|
||||
eq->size = GOLAN_NUM_EQES * sizeof(eq->eqes[0]);
|
||||
addr = golan_get_page ( &golan->pages );
|
||||
if (!addr) {
|
||||
eq->eqes = malloc_dma ( GOLAN_PAGE_SIZE, GOLAN_PAGE_SIZE );
|
||||
if (!eq->eqes) {
|
||||
rc = -ENOMEM;
|
||||
goto err_create_eq_eqe_alloc;
|
||||
}
|
||||
eq->eqes = (struct golan_eqe *)user_to_virt(addr, 0);
|
||||
|
||||
/* Set EQEs ownership bit to HW ownership */
|
||||
for (i = 0; i < GOLAN_NUM_EQES; ++i) {
|
||||
|
@ -823,7 +762,7 @@ static int golan_create_eq(struct golan *golan)
|
|||
in = (struct golan_create_eq_mbox_in_data *)GET_INBOX(golan, GEN_MBOX);
|
||||
|
||||
/* Fill the physical address of the page */
|
||||
in->pas[0] = USR_2_BE64_BUS(addr);
|
||||
in->pas[0] = VIRT_2_BE64_BUS( eq->eqes );
|
||||
in->ctx.log_sz_usr_page = cpu_to_be32((ilog2(GOLAN_NUM_EQES)) << 24 | golan->uar.index);
|
||||
DBGC( golan , "UAR idx %x (BE %x)\n", golan->uar.index, in->ctx.log_sz_usr_page);
|
||||
in->events_mask = cpu_to_be64(1 << GOLAN_EVENT_TYPE_PORT_CHANGE);
|
||||
|
@ -842,7 +781,7 @@ static int golan_create_eq(struct golan *golan)
|
|||
return 0;
|
||||
|
||||
err_create_eq_cmd:
|
||||
golan_return_page ( & golan->pages, virt_to_user ( eq->eqes ) );
|
||||
free_dma ( eq->eqes , GOLAN_PAGE_SIZE );
|
||||
err_create_eq_eqe_alloc:
|
||||
DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
|
||||
return rc;
|
||||
|
@ -867,7 +806,7 @@ static void golan_destory_eq(struct golan *golan)
|
|||
rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
|
||||
GOLAN_PRINT_RC_AND_CMD_STATUS;
|
||||
|
||||
golan_return_page ( &golan->pages, virt_to_user ( golan->eq.eqes ) );
|
||||
free_dma ( golan->eq.eqes , GOLAN_PAGE_SIZE );
|
||||
golan->eq.eqn = 0;
|
||||
|
||||
DBGC( golan, "%s Event queue (0x%x) was destroyed\n", __FUNCTION__, eqn);
|
||||
|
@ -1016,7 +955,6 @@ static int golan_create_cq(struct ib_device *ibdev,
|
|||
struct golan_create_cq_mbox_out *out;
|
||||
int rc;
|
||||
unsigned int i;
|
||||
userptr_t addr;
|
||||
|
||||
golan_cq = zalloc(sizeof(*golan_cq));
|
||||
if (!golan_cq) {
|
||||
|
@ -1031,12 +969,11 @@ static int golan_create_cq(struct ib_device *ibdev,
|
|||
goto err_create_cq_db_alloc;
|
||||
}
|
||||
|
||||
addr = golan_get_page ( &golan->pages );
|
||||
if (!addr) {
|
||||
golan_cq->cqes = malloc_dma ( GOLAN_PAGE_SIZE, GOLAN_PAGE_SIZE );
|
||||
if (!golan_cq->cqes) {
|
||||
rc = -ENOMEM;
|
||||
goto err_create_cq_cqe_alloc;
|
||||
}
|
||||
golan_cq->cqes = (struct golan_cqe64 *)user_to_virt(addr, 0);
|
||||
|
||||
/* Set CQEs ownership bit to HW ownership */
|
||||
for (i = 0; i < cq->num_cqes; ++i) {
|
||||
|
@ -1053,7 +990,7 @@ static int golan_create_cq(struct ib_device *ibdev,
|
|||
in = (struct golan_create_cq_mbox_in_data *)GET_INBOX(golan, GEN_MBOX);
|
||||
|
||||
/* Fill the physical address of the page */
|
||||
in->pas[0] = USR_2_BE64_BUS(addr);
|
||||
in->pas[0] = VIRT_2_BE64_BUS( golan_cq->cqes );
|
||||
in->ctx.cqe_sz_flags = GOLAN_CQE_SIZE_64 << 5;
|
||||
in->ctx.log_sz_usr_page = cpu_to_be32(((ilog2(cq->num_cqes)) << 24) | golan->uar.index);
|
||||
in->ctx.c_eqn = cpu_to_be16(golan->eq.eqn);
|
||||
|
@ -1071,7 +1008,7 @@ static int golan_create_cq(struct ib_device *ibdev,
|
|||
return 0;
|
||||
|
||||
err_create_cq_cmd:
|
||||
golan_return_page ( & golan->pages, virt_to_user ( golan_cq->cqes ) );
|
||||
free_dma( golan_cq->cqes , GOLAN_PAGE_SIZE );
|
||||
err_create_cq_cqe_alloc:
|
||||
free_dma(golan_cq->doorbell_record, GOLAN_CQ_DB_RECORD_SIZE);
|
||||
err_create_cq_db_alloc:
|
||||
|
@ -1108,7 +1045,7 @@ static void golan_destroy_cq(struct ib_device *ibdev,
|
|||
cq->cqn = 0;
|
||||
|
||||
ib_cq_set_drvdata(cq, NULL);
|
||||
golan_return_page ( & golan->pages, virt_to_user ( golan_cq->cqes ) );
|
||||
free_dma ( golan_cq->cqes , GOLAN_PAGE_SIZE );
|
||||
free_dma(golan_cq->doorbell_record, GOLAN_CQ_DB_RECORD_SIZE);
|
||||
free(golan_cq);
|
||||
|
||||
|
@ -1154,7 +1091,6 @@ static int golan_create_qp_aux(struct ib_device *ibdev,
|
|||
struct golan_cmd_layout *cmd;
|
||||
struct golan_wqe_data_seg *data;
|
||||
struct golan_create_qp_mbox_out *out;
|
||||
userptr_t addr;
|
||||
uint32_t wqe_size_in_bytes;
|
||||
uint32_t max_qp_size_in_wqes;
|
||||
unsigned int i;
|
||||
|
@ -1202,12 +1138,11 @@ static int golan_create_qp_aux(struct ib_device *ibdev,
|
|||
golan_qp->size = golan_qp->sq.size + golan_qp->rq.size;
|
||||
|
||||
/* allocate dma memory for WQEs (1 page is enough) - should change it */
|
||||
addr = golan_get_page ( &golan->pages );
|
||||
if (!addr) {
|
||||
golan_qp->wqes = malloc_dma ( GOLAN_PAGE_SIZE, GOLAN_PAGE_SIZE );
|
||||
if (!golan_qp->wqes) {
|
||||
rc = -ENOMEM;
|
||||
goto err_create_qp_wqe_alloc;
|
||||
}
|
||||
golan_qp->wqes = user_to_virt(addr, 0);
|
||||
golan_qp->rq.wqes = golan_qp->wqes;
|
||||
golan_qp->sq.wqes = golan_qp->wqes + golan_qp->rq.size;//(union golan_send_wqe *)&
|
||||
//(((struct golan_recv_wqe_ud *)(golan_qp->wqes))[qp->recv.num_wqes]);
|
||||
|
@ -1241,7 +1176,7 @@ static int golan_create_qp_aux(struct ib_device *ibdev,
|
|||
in = (struct golan_create_qp_mbox_in_data *)GET_INBOX(golan, GEN_MBOX);
|
||||
|
||||
/* Fill the physical address of the page */
|
||||
in->pas[0] = USR_2_BE64_BUS(addr);
|
||||
in->pas[0] = VIRT_2_BE64_BUS(golan_qp->wqes);
|
||||
in->ctx.qp_counter_set_usr_page = cpu_to_be32(golan->uar.index);
|
||||
|
||||
in->ctx.flags_pd = cpu_to_be32(golan->pdn);
|
||||
|
@ -1280,7 +1215,7 @@ static int golan_create_qp_aux(struct ib_device *ibdev,
|
|||
err_create_qp_cmd:
|
||||
free_dma(golan_qp->doorbell_record, sizeof(struct golan_qp_db));
|
||||
err_create_qp_db_alloc:
|
||||
golan_return_page ( & golan->pages, ( userptr_t ) golan_qp->wqes );
|
||||
free_dma ( golan_qp->wqes, GOLAN_PAGE_SIZE );
|
||||
err_create_qp_wqe_alloc:
|
||||
err_create_qp_sq_size:
|
||||
err_create_qp_sq_wqe_size:
|
||||
|
@ -1488,7 +1423,7 @@ static void golan_destroy_qp(struct ib_device *ibdev,
|
|||
|
||||
ib_qp_set_drvdata(qp, NULL);
|
||||
free_dma(golan_qp->doorbell_record, sizeof(struct golan_qp_db));
|
||||
golan_return_page ( & golan->pages, ( userptr_t ) golan_qp->wqes );
|
||||
free_dma ( golan_qp->wqes, GOLAN_PAGE_SIZE );
|
||||
free(golan_qp);
|
||||
|
||||
DBGC( golan ,"%s QP 0x%lx was destroyed\n", __FUNCTION__, qpn);
|
||||
|
@ -1526,7 +1461,6 @@ static int golan_post_send(struct ib_device *ibdev,
|
|||
unsigned long wqe_idx;
|
||||
struct golan_wqe_data_seg *data = NULL;
|
||||
struct golan_wqe_ctrl_seg *ctrl = NULL;
|
||||
// static uint8_t toggle = 0;
|
||||
|
||||
|
||||
wqe_idx_mask = (qp->send.num_wqes - 1);
|
||||
|
@ -1576,8 +1510,9 @@ static int golan_post_send(struct ib_device *ibdev,
|
|||
golan_qp->sq.next_idx = (golan_qp->sq.next_idx + GOLAN_WQEBBS_PER_SEND_UD_WQE);
|
||||
golan_qp->doorbell_record->send_db = cpu_to_be16(golan_qp->sq.next_idx);
|
||||
wmb();
|
||||
writeq(*((__be64 *)ctrl), golan->uar.virt + 0x800);// +
|
||||
// ((toggle++ & 0x1) ? 0x100 : 0x0));
|
||||
writeq(*((__be64 *)ctrl), golan->uar.virt
|
||||
+ ( ( golan_qp->sq.next_idx & 0x1 ) ? DB_BUFFER0_EVEN_OFFSET
|
||||
: DB_BUFFER0_ODD_OFFSET ) );
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1702,7 +1637,6 @@ err_query_vport_gid_cmd:
|
|||
static int golan_query_vport_pkey ( struct ib_device *ibdev ) {
|
||||
struct golan *golan = ib_get_drvdata ( ibdev );
|
||||
struct golan_cmd_layout *cmd;
|
||||
//struct golan_query_hca_vport_pkey_data *pkey_table;
|
||||
struct golan_query_hca_vport_pkey_inbox *in;
|
||||
int pkey_table_size_in_entries = (1 << (7 + golan->caps.pkey_table_size));
|
||||
int rc;
|
||||
|
@ -1719,8 +1653,6 @@ static int golan_query_vport_pkey ( struct ib_device *ibdev ) {
|
|||
rc = send_command_and_wait ( golan, DEF_CMD_IDX, GEN_MBOX, GEN_MBOX, __FUNCTION__ );
|
||||
GOLAN_CHECK_RC_AND_CMD_STATUS( err_query_vport_pkey_cmd );
|
||||
|
||||
//pkey_table = (struct golan_query_hca_vport_pkey_data *)( GET_OUTBOX ( golan, GEN_MBOX ) );
|
||||
|
||||
return 0;
|
||||
err_query_vport_pkey_cmd:
|
||||
DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
|
||||
|
@ -2100,10 +2032,15 @@ static void golan_poll_eq(struct ib_device *ibdev)
|
|||
cqn, eqe->data.cq_err.syndrome);
|
||||
// mlx5_cq_event(dev, cqn, eqe->type);
|
||||
break;
|
||||
/*
|
||||
* currently the driver do not support dynamic memory request
|
||||
* during FW run, a follow up change will allocate FW pages once and
|
||||
* never release them till driver shutdown, this change will not support
|
||||
* this request as currently this request is not issued anyway.
|
||||
case GOLAN_EVENT_TYPE_PAGE_REQUEST:
|
||||
{
|
||||
/* we should check if we get this event while we
|
||||
* waiting for a command */
|
||||
// we should check if we get this event while we
|
||||
// waiting for a command
|
||||
u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
|
||||
s16 npages = be16_to_cpu(eqe->data.req_pages.num_pages);
|
||||
|
||||
|
@ -2112,6 +2049,7 @@ static void golan_poll_eq(struct ib_device *ibdev)
|
|||
golan_provide_pages(golan, npages, func_id);
|
||||
}
|
||||
break;
|
||||
*/
|
||||
default:
|
||||
DBGC (golan ,"%s Unhandled event 0x%x on EQ 0x%x\n", __FUNCTION__,
|
||||
eqe->type, eq->eqn);
|
||||
|
@ -2231,7 +2169,6 @@ static int golan_register_ibdev(struct golan_port *port)
|
|||
|
||||
static inline void golan_bring_down(struct golan *golan)
|
||||
{
|
||||
|
||||
DBGC(golan, "%s: start\n", __FUNCTION__);
|
||||
|
||||
if (~golan->flags & GOLAN_OPEN) {
|
||||
|
@ -2413,7 +2350,8 @@ static int golan_probe_normal ( struct pci_device *pci ) {
|
|||
goto err_golan_alloc;
|
||||
}
|
||||
|
||||
if ( golan_init_pages( &golan->pages ) ) {
|
||||
/* at POST stage some BIOSes have limited available dynamic memory */
|
||||
if ( golan_init_fw_areas ( golan ) ) {
|
||||
rc = -ENOMEM;
|
||||
goto err_golan_golan_init_pages;
|
||||
}
|
||||
|
@ -2423,11 +2361,6 @@ static int golan_probe_normal ( struct pci_device *pci ) {
|
|||
golan->pci = pci;
|
||||
golan_pci_init( golan );
|
||||
/* config command queues */
|
||||
if ( fw_ver_and_cmdif( golan ) ) {
|
||||
rc = -1;
|
||||
goto err_fw_ver_cmdif;
|
||||
}
|
||||
|
||||
if ( golan_bring_up( golan ) ) {
|
||||
DBGC (golan ,"golan bringup failed\n");
|
||||
rc = -1;
|
||||
|
@ -2482,9 +2415,8 @@ err_golan_probe_alloc_ibdev:
|
|||
err_utils_init:
|
||||
golan_bring_down ( golan );
|
||||
err_golan_bringup:
|
||||
err_fw_ver_cmdif:
|
||||
iounmap( golan->iseg );
|
||||
golan_free_pages( &golan->pages );
|
||||
golan_free_fw_areas ( golan );
|
||||
err_golan_golan_init_pages:
|
||||
free ( golan );
|
||||
err_golan_alloc:
|
||||
|
@ -2513,7 +2445,7 @@ static void golan_remove_normal ( struct pci_device *pci ) {
|
|||
free_mlx_utils ( & golan->utils );
|
||||
}
|
||||
iounmap( golan->iseg );
|
||||
golan_free_pages( &golan->pages );
|
||||
golan_free_fw_areas ( golan );
|
||||
free(golan);
|
||||
}
|
||||
|
||||
|
@ -2528,14 +2460,16 @@ static mlx_status shomron_tx_uar_send_db ( struct ib_device *ibdev,
|
|||
( struct shomron_nodnic_eth_send_wqe * )wqbb;
|
||||
struct shomronprm_wqe_segment_ctrl_send *ctrl;
|
||||
|
||||
if ( ! ibdev || ! eth_wqe || ! flexboot_nodnic->device_priv.uar.virt ) {
|
||||
if ( ! eth_wqe || ! flexboot_nodnic->device_priv.uar.virt ) {
|
||||
DBG("%s: Invalid parameters\n",__FUNCTION__);
|
||||
status = MLX_FAILED;
|
||||
goto err;
|
||||
}
|
||||
wmb();
|
||||
ctrl = & eth_wqe->ctrl;
|
||||
writeq(*((__be64 *)ctrl), flexboot_nodnic->device_priv.uar.virt + 0x800);
|
||||
writeq(*((__be64 *)ctrl), flexboot_nodnic->device_priv.uar.virt +
|
||||
( ( MLX_GET ( ctrl, wqe_index ) & 0x1 ) ? DB_BUFFER0_ODD_OFFSET
|
||||
: DB_BUFFER0_EVEN_OFFSET ) );
|
||||
err:
|
||||
return status;
|
||||
}
|
||||
|
|
|
@ -111,6 +111,18 @@ struct golan_uar {
|
|||
unsigned long phys;
|
||||
};
|
||||
|
||||
|
||||
struct golan_firmware_area {
|
||||
/* length of area in pages */
|
||||
uint32_t npages;
|
||||
/** Firmware area in external memory
|
||||
*
|
||||
* This is allocated when first needed, and freed only on
|
||||
* final teardown, in order to avoid memory map changes at
|
||||
* runtime.
|
||||
*/
|
||||
userptr_t area;
|
||||
};
|
||||
/* Queue Pair */
|
||||
#define GOLAN_SEND_WQE_BB_SIZE 64
|
||||
#define GOLAN_SEND_UD_WQE_SIZE sizeof(struct golan_send_wqe_ud)
|
||||
|
@ -204,6 +216,8 @@ struct golan_completion_queue {
|
|||
#define GOLAN_EQE_SIZE sizeof(struct golan_eqe)
|
||||
#define GOLAN_NUM_EQES 8
|
||||
#define GOLAN_EQ_DOORBELL_OFFSET 0x40
|
||||
#define DB_BUFFER0_EVEN_OFFSET 0x800
|
||||
#define DB_BUFFER0_ODD_OFFSET 0x900
|
||||
|
||||
#define GOLAN_EQ_MAP_ALL_EVENTS \
|
||||
((1 << GOLAN_EVENT_TYPE_PATH_MIG )| \
|
||||
|
@ -323,6 +337,8 @@ struct golan {
|
|||
mlx_utils *utils;
|
||||
|
||||
struct golan_port ports[GOLAN_MAX_PORTS];
|
||||
#define GOLAN_FW_AREAS_NUM 2
|
||||
struct golan_firmware_area fw_areas[GOLAN_FW_AREAS_NUM];
|
||||
};
|
||||
|
||||
#endif /* _GOLAN_H_*/
|
||||
|
|
|
@ -169,13 +169,7 @@ nodnic_device_clear_int (
|
|||
mlx_status status = MLX_SUCCESS;
|
||||
mlx_uint32 disable = 1;
|
||||
#ifndef DEVICE_CX3
|
||||
#define NODNIC_CLEAR_INT_BAR_OFFSET 0x100C
|
||||
if ( device_priv->device_cap.support_bar_cq_ctrl ) {
|
||||
status = mlx_pci_mem_write ( device_priv->utils, MlxPciWidthUint32, 0,
|
||||
( mlx_uint64 ) ( NODNIC_CLEAR_INT_BAR_OFFSET ), 1, &disable );
|
||||
} else {
|
||||
status = nodnic_cmd_write(device_priv, NODNIC_NIC_DISABLE_INT_OFFSET, disable);
|
||||
}
|
||||
MLX_CHECK_STATUS(device_priv, status, clear_int_done, "failed writing to disable_bit");
|
||||
#else
|
||||
mlx_utils *utils = device_priv->utils;
|
||||
|
|
|
@ -20,8 +20,8 @@
|
|||
FILE_LICENCE ( GPL2_OR_LATER );
|
||||
|
||||
#include "mlx_mtu.h"
|
||||
#include "mlx_memory.h"
|
||||
#include "mlx_bail.h"
|
||||
#include "../../include/public/mlx_memory.h"
|
||||
#include "../../include/public/mlx_bail.h"
|
||||
|
||||
mlx_status
|
||||
mlx_get_max_mtu(
|
||||
|
@ -58,3 +58,37 @@ reg_err:
|
|||
bad_param:
|
||||
return status;
|
||||
}
|
||||
|
||||
mlx_status
|
||||
mlx_set_admin_mtu(
|
||||
IN mlx_utils *utils,
|
||||
IN mlx_uint8 port_num,
|
||||
IN mlx_uint32 admin_mtu
|
||||
)
|
||||
{
|
||||
mlx_status status = MLX_SUCCESS;
|
||||
struct mlx_mtu mtu;
|
||||
mlx_uint32 reg_status;
|
||||
|
||||
if (utils == NULL) {
|
||||
status = MLX_INVALID_PARAMETER;
|
||||
goto bad_param;
|
||||
}
|
||||
|
||||
mlx_memory_set(utils, &mtu, 0, sizeof(mtu));
|
||||
|
||||
mtu.local_port = port_num;
|
||||
mtu.admin_mtu = admin_mtu;
|
||||
|
||||
status = mlx_reg_access(utils, REG_ID_PMTU, REG_ACCESS_WRITE, &mtu,
|
||||
sizeof(mtu), ®_status);
|
||||
MLX_CHECK_STATUS(utils, status, reg_err, "mlx_reg_access failed ");
|
||||
if (reg_status != 0) {
|
||||
MLX_DEBUG_ERROR(utils,"mlx_reg_access failed with status = %d\n", reg_status);
|
||||
status = MLX_FAILED;
|
||||
goto reg_err;
|
||||
}
|
||||
reg_err:
|
||||
bad_param:
|
||||
return status;
|
||||
}
|
||||
|
|
|
@ -22,8 +22,8 @@
|
|||
|
||||
FILE_LICENCE ( GPL2_OR_LATER );
|
||||
|
||||
#include "mlx_reg_access.h"
|
||||
#include "mlx_utils.h"
|
||||
#include "../../include/public/mlx_utils.h"
|
||||
#include "../../mlx_lib/mlx_reg_access/mlx_reg_access.h"
|
||||
|
||||
#define BYTE_TO_BIT 0x8
|
||||
|
||||
|
@ -49,4 +49,10 @@ mlx_get_max_mtu(
|
|||
OUT mlx_uint32 *max_mtu
|
||||
);
|
||||
|
||||
mlx_status
|
||||
mlx_set_admin_mtu(
|
||||
IN mlx_utils *utils,
|
||||
IN mlx_uint8 port_num,
|
||||
IN mlx_uint32 admin_mtu
|
||||
);
|
||||
#endif /* MLX_MTU_H_ */
|
||||
|
|
|
@ -39,7 +39,6 @@ struct nvconfig_tlv_mapping nvconfig_tlv_mapping[] = {
|
|||
TlvMappingEntry(0x2001, 0x195, NVRAM_TLV_CLASS_HOST, FALSE),
|
||||
TlvMappingEntry(0x2010, 0x210, NVRAM_TLV_CLASS_HOST, FALSE),
|
||||
TlvMappingEntry(0x2011, 0x211, NVRAM_TLV_CLASS_GLOBAL, FALSE),
|
||||
TlvMappingEntry(0x2020, 0x2020, NVRAM_TLV_CLASS_PHYSICAL_PORT, FALSE),
|
||||
TlvMappingEntry(0x2021, 0x221, NVRAM_TLV_CLASS_HOST, FALSE),
|
||||
TlvMappingEntry(0x2023, 0x223, NVRAM_TLV_CLASS_HOST, FALSE),
|
||||
TlvMappingEntry(0x2006, 0x206, NVRAM_TLV_CLASS_HOST, FALSE),
|
||||
|
@ -67,6 +66,7 @@ struct nvconfig_tlv_mapping nvconfig_tlv_mapping[] = {
|
|||
TlvMappingEntry(0x110, 0x110, NVRAM_TLV_CLASS_HOST, FALSE),
|
||||
TlvMappingEntry(0x192, 0x192, NVRAM_TLV_CLASS_GLOBAL, FALSE),
|
||||
TlvMappingEntry(0x101, 0x101, NVRAM_TLV_CLASS_GLOBAL, TRUE),
|
||||
TlvMappingEntry(0x194, 0x194, NVRAM_TLV_CLASS_GLOBAL, FALSE),
|
||||
TlvMappingEntry(0, 0, 0, 0),
|
||||
};
|
||||
|
||||
|
@ -239,6 +239,7 @@ nvconfig_nvdata_access(
|
|||
IN REG_ACCESS_OPT opt,
|
||||
IN mlx_size data_size,
|
||||
IN NV_DEFAULT_OPT def_en,
|
||||
IN NVDA_WRITER_ID writer_id,
|
||||
IN OUT mlx_uint8 *version,
|
||||
IN OUT mlx_void *data
|
||||
)
|
||||
|
@ -263,10 +264,9 @@ nvconfig_nvdata_access(
|
|||
data_size_align_to_dword = ((data_size + 3) / sizeof(mlx_uint32)) * sizeof(mlx_uint32);
|
||||
mlx_memory_set(utils, &nvda, 0, sizeof(nvda));
|
||||
nvda.nv_header.length = data_size_align_to_dword;
|
||||
nvda.nv_header.rd_en = 0;
|
||||
nvda.nv_header.def_en = def_en;
|
||||
nvda.nv_header.over_en = 1;
|
||||
nvda.nv_header.access_mode = def_en;
|
||||
nvda.nv_header.version = *version;
|
||||
nvda.nv_header.writer_id = writer_id;
|
||||
|
||||
nvconfig_fill_tlv_type(port, class_code, real_tlv_type, &nvda.nv_header.tlv_type);
|
||||
|
||||
|
|
|
@ -31,6 +31,17 @@ typedef enum {
|
|||
NVRAM_TLV_CLASS_HOST = 3,
|
||||
} NVRAM_CLASS_CODE;
|
||||
|
||||
typedef enum {
|
||||
NVDA_NV_HEADER_WRITER_ID_UEFI_HII = 0x6,
|
||||
NVDA_NV_HEADER_WRITER_ID_FLEXBOOT = 0x8,
|
||||
} NVDA_WRITER_ID;
|
||||
|
||||
typedef enum {
|
||||
TLV_ACCESS_DEFAULT_DIS = 0,
|
||||
TLV_ACCESS_CURRENT = 1,
|
||||
TLV_ACCESS_DEFAULT_EN = 2,
|
||||
} NV_DEFAULT_OPT;
|
||||
|
||||
struct nvconfig_tlv_type_per_port {
|
||||
mlx_uint32 param_idx :16;
|
||||
mlx_uint32 port :8;
|
||||
|
@ -78,26 +89,24 @@ struct nvconfig_header {
|
|||
mlx_uint32 length :9; /*Size of configuration item data in bytes between 0..256 */
|
||||
mlx_uint32 reserved0 :3;
|
||||
mlx_uint32 version :4; /* Configuration item version */
|
||||
mlx_uint32 reserved1 :7;
|
||||
mlx_uint32 writer_id :5;
|
||||
mlx_uint32 reserved1 :1;
|
||||
|
||||
mlx_uint32 def_en :1; /*Choose whether to access the default value or the user-defined value.
|
||||
0x0 Read or write the user-defined value.
|
||||
0x1 Read the default value (only valid for reads).*/
|
||||
mlx_uint32 access_mode :2; /*Defines which value of the Configuration Item will be accessed.
|
||||
0x0: NEXT - Next value to be applied
|
||||
0x1: CURRENT - Currently set values (only valid for Query operation) Supported only if NVGC.nvda_read_current_settings==1.
|
||||
0x2: FACTORY - Default factory values (only valid for Query operation). Supported only if NVGC.nvda_read_factory_settings==1.*/
|
||||
|
||||
mlx_uint32 rd_en :1; /*enables reading the TLV by lower priorities
|
||||
0 - TLV can be read by the subsequent lifecycle priorities.
|
||||
1 - TLV cannot be read by the subsequent lifecycle priorities. */
|
||||
mlx_uint32 over_en :1; /*enables overwriting the TLV by lower priorities
|
||||
0 - Can only be overwritten by the current lifecycle priority
|
||||
1 - Allowed to be overwritten by subsequent lifecycle priorities */
|
||||
mlx_uint32 reserved2 :2;
|
||||
mlx_uint32 header_type :2;
|
||||
mlx_uint32 priority :2;
|
||||
mlx_uint32 reserved3 :2;
|
||||
mlx_uint32 valid :2;
|
||||
/* -------------- */
|
||||
union nvconfig_tlv_type tlv_type;;
|
||||
/* -------------- */
|
||||
mlx_uint32 crc :16;
|
||||
mlx_uint32 reserved :16;
|
||||
|
||||
};
|
||||
|
||||
#define NVCONFIG_MAX_TLV_SIZE 256
|
||||
|
@ -149,6 +158,7 @@ nvconfig_nvdata_access(
|
|||
IN REG_ACCESS_OPT opt,
|
||||
IN mlx_size data_size,
|
||||
IN NV_DEFAULT_OPT def_en,
|
||||
IN NVDA_WRITER_ID writer_id,
|
||||
IN OUT mlx_uint8 *version,
|
||||
IN OUT mlx_void *data
|
||||
);
|
||||
|
|
|
@ -386,7 +386,8 @@ nvconfig_nvdata_default_access(
|
|||
mlx_uint8 version = 0;
|
||||
|
||||
status = nvconfig_nvdata_access(utils, port, tlv_type, REG_ACCESS_READ,
|
||||
data_size, TLV_ACCESS_DEFAULT_EN, &version, data);
|
||||
data_size, TLV_ACCESS_DEFAULT_EN, 0,
|
||||
&version, data);
|
||||
MLX_CHECK_STATUS(NULL, status, nvdata_access_err,
|
||||
"nvconfig_nvdata_access failed ");
|
||||
for (index = 0; index * 4 < data_size; index++) {
|
||||
|
@ -493,6 +494,8 @@ nvconfig_read_rom_ini_values(
|
|||
)
|
||||
{
|
||||
mlx_status status = MLX_SUCCESS;
|
||||
mlx_uint8 version = 0;
|
||||
mlx_uint32 index;
|
||||
|
||||
if (utils == NULL || rom_ini == NULL) {
|
||||
status = MLX_INVALID_PARAMETER;
|
||||
|
@ -501,8 +504,16 @@ nvconfig_read_rom_ini_values(
|
|||
}
|
||||
mlx_memory_set(utils, rom_ini, 0, sizeof(*rom_ini));
|
||||
|
||||
status = nvconfig_nvdata_default_access(utils, 0, GLOBAL_ROM_INI_TYPE,
|
||||
sizeof(*rom_ini), rom_ini);
|
||||
status = nvconfig_nvdata_access(utils, 0, GLOBAL_ROM_INI_TYPE, REG_ACCESS_READ,
|
||||
sizeof(*rom_ini), TLV_ACCESS_DEFAULT_DIS, 0,
|
||||
&version, rom_ini);
|
||||
MLX_CHECK_STATUS(NULL, status, bad_param,
|
||||
"nvconfig_nvdata_access failed ");
|
||||
for (index = 0; index * 4 < sizeof(*rom_ini); index++) {
|
||||
mlx_memory_be32_to_cpu(utils, (((mlx_uint32 *) rom_ini)[index]),
|
||||
((mlx_uint32 *) rom_ini) + index);
|
||||
}
|
||||
|
||||
bad_param:
|
||||
return status;
|
||||
}
|
||||
|
|
|
@ -1,145 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) 2015 Mellanox Technologies Ltd.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License as
|
||||
* published by the Free Software Foundation; either version 2 of the
|
||||
* License, or any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
||||
* 02110-1301, USA.
|
||||
*/
|
||||
|
||||
FILE_LICENCE ( GPL2_OR_LATER );
|
||||
|
||||
#include "mlx_ocbb.h"
|
||||
#include "mlx_icmd.h"
|
||||
#include "mlx_bail.h"
|
||||
|
||||
mlx_status
|
||||
mlx_ocbb_init (
|
||||
IN mlx_utils *utils,
|
||||
IN mlx_uint64 address
|
||||
)
|
||||
{
|
||||
mlx_status status = MLX_SUCCESS;
|
||||
struct mlx_ocbb_init ocbb_init;
|
||||
ocbb_init.address_hi = (mlx_uint32)(address >> 32);
|
||||
ocbb_init.address_lo = (mlx_uint32)address;
|
||||
|
||||
if (utils == NULL) {
|
||||
status = MLX_INVALID_PARAMETER;
|
||||
goto bad_param;
|
||||
}
|
||||
|
||||
status = mlx_icmd_send_command(
|
||||
utils,
|
||||
OCBB_INIT,
|
||||
&ocbb_init,
|
||||
sizeof(ocbb_init),
|
||||
0
|
||||
);
|
||||
MLX_CHECK_STATUS(utils, status, icmd_err, "mlx_icmd_send_command failed");
|
||||
icmd_err:
|
||||
bad_param:
|
||||
return status;
|
||||
}
|
||||
|
||||
mlx_status
|
||||
mlx_ocbb_query_header_status (
|
||||
IN mlx_utils *utils,
|
||||
OUT mlx_uint8 *ocbb_status
|
||||
)
|
||||
{
|
||||
mlx_status status = MLX_SUCCESS;
|
||||
struct mlx_ocbb_query_status ocbb_query_status;
|
||||
|
||||
if (utils == NULL) {
|
||||
status = MLX_INVALID_PARAMETER;
|
||||
goto bad_param;
|
||||
}
|
||||
|
||||
status = mlx_icmd_send_command(
|
||||
utils,
|
||||
OCBB_QUERY_HEADER_STATUS,
|
||||
&ocbb_query_status,
|
||||
0,
|
||||
sizeof(ocbb_query_status)
|
||||
);
|
||||
MLX_CHECK_STATUS(utils, status, icmd_err, "mlx_icmd_send_command failed");
|
||||
*ocbb_status = ocbb_query_status.status;
|
||||
icmd_err:
|
||||
bad_param:
|
||||
return status;
|
||||
}
|
||||
|
||||
mlx_status
|
||||
mlx_ocbb_query_etoc_status (
|
||||
IN mlx_utils *utils,
|
||||
OUT mlx_uint8 *ocbb_status
|
||||
)
|
||||
{
|
||||
mlx_status status = MLX_SUCCESS;
|
||||
struct mlx_ocbb_query_status ocbb_query_status;
|
||||
|
||||
if (utils == NULL) {
|
||||
status = MLX_INVALID_PARAMETER;
|
||||
goto bad_param;
|
||||
}
|
||||
|
||||
status = mlx_icmd_send_command(
|
||||
utils,
|
||||
OCBB_QUERY_ETOC_STATUS,
|
||||
&ocbb_query_status,
|
||||
0,
|
||||
sizeof(ocbb_query_status)
|
||||
);
|
||||
MLX_CHECK_STATUS(utils, status, icmd_err, "mlx_icmd_send_command failed");
|
||||
*ocbb_status = ocbb_query_status.status;
|
||||
icmd_err:
|
||||
bad_param:
|
||||
return status;
|
||||
}
|
||||
|
||||
mlx_status
|
||||
mlx_ocbb_set_event (
|
||||
IN mlx_utils *utils,
|
||||
IN mlx_uint64 event_data,
|
||||
IN mlx_uint8 event_number,
|
||||
IN mlx_uint8 event_length,
|
||||
IN mlx_uint8 data_length,
|
||||
IN mlx_uint8 data_start_offset
|
||||
)
|
||||
{
|
||||
mlx_status status = MLX_SUCCESS;
|
||||
struct mlx_ocbb_set_event ocbb_event;
|
||||
|
||||
if (utils == NULL) {
|
||||
status = MLX_INVALID_PARAMETER;
|
||||
goto bad_param;
|
||||
}
|
||||
|
||||
ocbb_event.data_length = data_length;
|
||||
ocbb_event.data_start_offset = data_start_offset;
|
||||
ocbb_event.event_number = event_number;
|
||||
ocbb_event.event_data = event_data;
|
||||
ocbb_event.event_length = event_length;
|
||||
status = mlx_icmd_send_command(
|
||||
utils,
|
||||
OCBB_QUERY_SET_EVENT,
|
||||
&ocbb_event,
|
||||
sizeof(ocbb_event),
|
||||
0
|
||||
);
|
||||
MLX_CHECK_STATUS(utils, status, icmd_err, "mlx_icmd_send_command failed");
|
||||
icmd_err:
|
||||
bad_param:
|
||||
return status;
|
||||
}
|
|
@ -1,73 +0,0 @@
|
|||
#ifndef MLX_OCBB_H_
|
||||
#define MLX_OCBB_H_
|
||||
|
||||
/*
|
||||
* Copyright (C) 2015 Mellanox Technologies Ltd.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License as
|
||||
* published by the Free Software Foundation; either version 2 of the
|
||||
* License, or any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
||||
* 02110-1301, USA.
|
||||
*/
|
||||
|
||||
FILE_LICENCE ( GPL2_OR_LATER );
|
||||
|
||||
#include "mlx_utils.h"
|
||||
|
||||
#define MLX_OCBB_EVENT_DATA_SIZE 2
|
||||
struct mlx_ocbb_init {
|
||||
mlx_uint32 address_hi;
|
||||
mlx_uint32 address_lo;
|
||||
};
|
||||
|
||||
struct mlx_ocbb_query_status {
|
||||
mlx_uint32 reserved :24;
|
||||
mlx_uint32 status :8;
|
||||
};
|
||||
|
||||
struct mlx_ocbb_set_event {
|
||||
mlx_uint64 event_data;
|
||||
mlx_uint32 event_number :8;
|
||||
mlx_uint32 event_length :8;
|
||||
mlx_uint32 data_length :8;
|
||||
mlx_uint32 data_start_offset :8;
|
||||
};
|
||||
|
||||
mlx_status
|
||||
mlx_ocbb_init (
|
||||
IN mlx_utils *utils,
|
||||
IN mlx_uint64 address
|
||||
);
|
||||
|
||||
mlx_status
|
||||
mlx_ocbb_query_header_status (
|
||||
IN mlx_utils *utils,
|
||||
OUT mlx_uint8 *ocbb_status
|
||||
);
|
||||
|
||||
mlx_status
|
||||
mlx_ocbb_query_etoc_status (
|
||||
IN mlx_utils *utils,
|
||||
OUT mlx_uint8 *ocbb_status
|
||||
);
|
||||
|
||||
mlx_status
|
||||
mlx_ocbb_set_event (
|
||||
IN mlx_utils *utils,
|
||||
IN mlx_uint64 EventData,
|
||||
IN mlx_uint8 EventNumber,
|
||||
IN mlx_uint8 EventLength,
|
||||
IN mlx_uint8 DataLength,
|
||||
IN mlx_uint8 DataStartOffset
|
||||
);
|
||||
#endif /* MLX_OCBB_H_ */
|
|
@ -31,11 +31,6 @@ typedef enum {
|
|||
REG_ACCESS_WRITE = 2,
|
||||
} REG_ACCESS_OPT;
|
||||
|
||||
typedef enum {
|
||||
TLV_ACCESS_DEFAULT_DIS = 0,
|
||||
TLV_ACCESS_DEFAULT_EN = 1,
|
||||
} NV_DEFAULT_OPT;
|
||||
|
||||
#define REG_ID_NVDA 0x9024
|
||||
#define REG_ID_NVDI 0x9025
|
||||
#define REG_ID_NVIA 0x9029
|
||||
|
|
|
@ -1,84 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) 2015 Mellanox Technologies Ltd.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License as
|
||||
* published by the Free Software Foundation; either version 2 of the
|
||||
* License, or any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
||||
* 02110-1301, USA.
|
||||
*/
|
||||
|
||||
FILE_LICENCE ( GPL2_OR_LATER );
|
||||
|
||||
#include "mlx_wol_rol.h"
|
||||
#include "mlx_icmd.h"
|
||||
#include "mlx_memory.h"
|
||||
#include "mlx_bail.h"
|
||||
|
||||
mlx_status
|
||||
mlx_set_wol (
|
||||
IN mlx_utils *utils,
|
||||
IN mlx_uint8 wol_mask
|
||||
)
|
||||
{
|
||||
mlx_status status = MLX_SUCCESS;
|
||||
struct mlx_wol_rol wol_rol;
|
||||
|
||||
if (utils == NULL) {
|
||||
status = MLX_INVALID_PARAMETER;
|
||||
goto bad_param;
|
||||
}
|
||||
|
||||
mlx_memory_set(utils, &wol_rol, 0, sizeof(wol_rol));
|
||||
wol_rol.wol_mode_valid = TRUE;
|
||||
wol_rol.wol_mode = wol_mask;
|
||||
status = mlx_icmd_send_command(
|
||||
utils,
|
||||
SET_WOL_ROL,
|
||||
&wol_rol,
|
||||
sizeof(wol_rol),
|
||||
0
|
||||
);
|
||||
MLX_CHECK_STATUS(utils, status, icmd_err, "mlx_icmd_send_command failed");
|
||||
icmd_err:
|
||||
bad_param:
|
||||
return status;
|
||||
}
|
||||
|
||||
mlx_status
|
||||
mlx_query_wol (
|
||||
IN mlx_utils *utils,
|
||||
OUT mlx_uint8 *wol_mask
|
||||
)
|
||||
{
|
||||
mlx_status status = MLX_SUCCESS;
|
||||
struct mlx_wol_rol wol_rol;
|
||||
|
||||
if (utils == NULL || wol_mask == NULL) {
|
||||
status = MLX_INVALID_PARAMETER;
|
||||
goto bad_param;
|
||||
}
|
||||
|
||||
mlx_memory_set(utils, &wol_rol, 0, sizeof(wol_rol));
|
||||
status = mlx_icmd_send_command(
|
||||
utils,
|
||||
QUERY_WOL_ROL,
|
||||
&wol_rol,
|
||||
0,
|
||||
sizeof(wol_rol)
|
||||
);
|
||||
MLX_CHECK_STATUS(utils, status, icmd_err, "mlx_icmd_send_command failed");
|
||||
*wol_mask = wol_rol.wol_mode;
|
||||
icmd_err:
|
||||
bad_param:
|
||||
return status;
|
||||
}
|
|
@ -1,61 +0,0 @@
|
|||
#ifndef MLX_WOL_ROL_H_
|
||||
#define MLX_WOL_ROL_H_
|
||||
|
||||
/*
|
||||
* Copyright (C) 2015 Mellanox Technologies Ltd.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License as
|
||||
* published by the Free Software Foundation; either version 2 of the
|
||||
* License, or any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
||||
* 02110-1301, USA.
|
||||
*/
|
||||
|
||||
FILE_LICENCE ( GPL2_OR_LATER );
|
||||
|
||||
|
||||
#include "mlx_utils.h"
|
||||
|
||||
typedef enum {
|
||||
WOL_MODE_DISABLE = 0x0,
|
||||
WOL_MODE_SECURE = 0x2,
|
||||
WOL_MODE_MAGIC = 0x4,
|
||||
WOL_MODE_ARP = 0x8,
|
||||
WOL_MODE_BC = 0x10,
|
||||
WOL_MODE_MC = 0x20,
|
||||
WOL_MODE_UC = 0x40,
|
||||
WOL_MODE_PHY = 0x80,
|
||||
} WOL_MODE;
|
||||
|
||||
struct mlx_wol_rol {
|
||||
mlx_uint32 reserved0 :32;
|
||||
mlx_uint32 reserved1 :32;
|
||||
mlx_uint32 wol_mode :8;
|
||||
mlx_uint32 rol_mode :8;
|
||||
mlx_uint32 reserved3 :14;
|
||||
mlx_uint32 wol_mode_valid :1;
|
||||
mlx_uint32 rol_mode_valid :1;
|
||||
};
|
||||
|
||||
mlx_status
|
||||
mlx_set_wol (
|
||||
IN mlx_utils *utils,
|
||||
IN mlx_uint8 wol_mask
|
||||
);
|
||||
|
||||
mlx_status
|
||||
mlx_query_wol (
|
||||
IN mlx_utils *utils,
|
||||
OUT mlx_uint8 *wol_mask
|
||||
);
|
||||
|
||||
#endif /* MLX_WOL_ROL_H_ */
|
|
@ -1,9 +0,0 @@
|
|||
MlxDebugLogImpl()
|
||||
{
|
||||
DBGC((DEBUG),"");
|
||||
}
|
||||
MlxInfoLogImpl()
|
||||
{
|
||||
DBGC((INFO),"");
|
||||
}
|
||||
}
|
|
@ -107,7 +107,7 @@ mlx_pci_mem_read(
|
|||
status = MLX_INVALID_PARAMETER;
|
||||
goto bail;
|
||||
}
|
||||
status = mlx_pci_mem_read_priv(utils, bar_index, width, offset, count, buffer);
|
||||
status = mlx_pci_mem_read_priv(utils, width,bar_index, offset, count, buffer);
|
||||
bail:
|
||||
return status;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue