aboutsummaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorMichael Brown <mcb30@ipxe.org>2010-09-19 17:35:25 +0100
committerMichael Brown <mcb30@ipxe.org>2010-09-21 02:22:01 +0100
commit5a981cff8e449055bfd3f1a529d3411686ae163d (patch)
tree6733b3ebb987f0c75abdedd7dc7fff201bf8d58f /src
parented0ea7cfc2fbe63d501b999143271e28232252f6 (diff)
downloadipxe-5a981cff8e449055bfd3f1a529d3411686ae163d.tar.gz
[hermon] Add support for dual-protocol devices
Originally-implemented-by: Itay Gazit <itaygazit@gmail.com> Signed-off-by: Michael Brown <mcb30@ipxe.org>
Diffstat (limited to 'src')
-rw-r--r--src/drivers/infiniband/hermon.c730
-rw-r--r--src/drivers/infiniband/hermon.h63
-rw-r--r--src/drivers/net/mtnic.c1853
-rw-r--r--src/drivers/net/mtnic.h722
4 files changed, 712 insertions, 2656 deletions
diff --git a/src/drivers/infiniband/hermon.c b/src/drivers/infiniband/hermon.c
index 2b8da122..60a5620b 100644
--- a/src/drivers/infiniband/hermon.c
+++ b/src/drivers/infiniband/hermon.c
@@ -36,6 +36,8 @@ FILE_LICENCE ( GPL2_OR_LATER );
#include <ipxe/netdevice.h>
#include <ipxe/infiniband.h>
#include <ipxe/ib_smc.h>
+#include <ipxe/if_ether.h>
+#include <ipxe/ethernet.h>
#include "hermon.h"
/**
@@ -304,12 +306,13 @@ hermon_cmd_close_port ( struct hermon *hermon, unsigned int port ) {
}
static inline int
-hermon_cmd_set_port ( struct hermon *hermon, unsigned int port,
- union hermonprm_set_port *set_port ) {
+hermon_cmd_set_port ( struct hermon *hermon, int is_ethernet,
+ unsigned int port_selector,
+ const union hermonprm_set_port *set_port ) {
return hermon_cmd ( hermon,
HERMON_HCR_IN_CMD ( HERMON_HCR_SET_PORT,
1, sizeof ( *set_port ) ),
- 0, set_port, port, NULL );
+ is_ethernet, set_port, port_selector, NULL );
}
static inline int
@@ -492,6 +495,24 @@ hermon_cmd_mgid_hash ( struct hermon *hermon, const union ib_gid *gid,
}
static inline int
+hermon_cmd_query_port ( struct hermon *hermon, unsigned int port,
+ struct hermonprm_query_port_cap *query_port ) {
+ return hermon_cmd ( hermon,
+ HERMON_HCR_OUT_CMD ( HERMON_HCR_QUERY_PORT,
+ 1, sizeof ( *query_port ) ),
+ 0, NULL, port, query_port );
+}
+
+static inline int
+hermon_cmd_sense_port ( struct hermon *hermon, unsigned int port,
+ struct hermonprm_sense_port *port_type ) {
+ return hermon_cmd ( hermon,
+ HERMON_HCR_OUT_CMD ( HERMON_HCR_SENSE_PORT,
+ 1, sizeof ( *port_type ) ),
+ 0, NULL, port, port_type );
+}
+
+static inline int
hermon_cmd_run_fw ( struct hermon *hermon ) {
return hermon_cmd ( hermon,
HERMON_HCR_VOID_CMD ( HERMON_HCR_RUN_FW ),
@@ -559,16 +580,6 @@ hermon_cmd_map_fa ( struct hermon *hermon,
0, map, 1, NULL );
}
-static inline int
-hermon_cmd_sense_port ( struct hermon *hermon, unsigned int port,
- struct hermonprm_sense_port *port_type ) {
- return hermon_cmd ( hermon,
- HERMON_HCR_OUT_CMD ( HERMON_HCR_SENSE_PORT,
- 1, sizeof ( *port_type ) ),
- 0, NULL, port, port_type );
-}
-
-
/***************************************************************************
*
* Memory translation table operations
@@ -916,6 +927,7 @@ static int hermon_alloc_qpn ( struct ib_device *ibdev,
return 0;
case IB_QPT_UD:
case IB_QPT_RC:
+ case IB_QPT_ETH:
/* Find a free queue pair number */
qpn_offset = hermon_bitmask_alloc ( hermon->qp_inuse,
HERMON_MAX_QPS, 1 );
@@ -982,6 +994,7 @@ static uint8_t hermon_qp_st[] = {
[IB_QPT_GSI] = HERMON_ST_MLX,
[IB_QPT_UD] = HERMON_ST_UD,
[IB_QPT_RC] = HERMON_ST_RC,
+ [IB_QPT_ETH] = HERMON_ST_MLX,
};
/**
@@ -1163,7 +1176,9 @@ static int hermon_modify_qp ( struct ib_device *ibdev,
if ( hermon_qp->state < HERMON_QP_ST_RTR ) {
memset ( &qpctx, 0, sizeof ( qpctx ) );
MLX_FILL_2 ( &qpctx, 4,
- qpc_eec_data.mtu, HERMON_MTU_2048,
+ qpc_eec_data.mtu,
+ ( ( qp->type == IB_QPT_ETH ) ?
+ HERMON_MTU_ETH : HERMON_MTU_2048 ),
qpc_eec_data.msg_max, 31 );
MLX_FILL_1 ( &qpctx, 7,
qpc_eec_data.remote_qpn_een, qp->av.qpn );
@@ -1402,6 +1417,38 @@ hermon_fill_rc_send_wqe ( struct ib_device *ibdev,
return HERMON_OPCODE_SEND;
}
+/**
+ * Construct Ethernet send work queue entry
+ *
+ * @v ibdev Infiniband device
+ * @v qp Queue pair
+ * @v av Address vector
+ * @v iobuf I/O buffer
+ * @v wqe Send work queue entry
+ * @ret opcode Control opcode
+ */
+static unsigned int
+hermon_fill_eth_send_wqe ( struct ib_device *ibdev,
+ struct ib_queue_pair *qp __unused,
+ struct ib_address_vector *av __unused,
+ struct io_buffer *iobuf,
+ union hermon_send_wqe *wqe ) {
+ struct hermon *hermon = ib_get_drvdata ( ibdev );
+
+ /* Fill work queue entry */
+ MLX_FILL_1 ( &wqe->eth.ctrl, 1, ds,
+ ( ( offsetof ( typeof ( wqe->mlx ), data[1] ) / 16 ) ) );
+ MLX_FILL_2 ( &wqe->eth.ctrl, 2,
+ c, 0x03 /* generate completion */,
+ s, 1 /* inhibit ICRC */ );
+ MLX_FILL_1 ( &wqe->eth.data[0], 0,
+ byte_count, iob_len ( iobuf ) );
+ MLX_FILL_1 ( &wqe->eth.data[0], 1, l_key, hermon->lkey );
+ MLX_FILL_1 ( &wqe->eth.data[0], 3,
+ local_address_l, virt_to_bus ( iobuf->data ) );
+ return HERMON_OPCODE_SEND;
+}
+
/** Work queue entry constructors */
static unsigned int
( * hermon_fill_send_wqe[] ) ( struct ib_device *ibdev,
@@ -1413,6 +1460,7 @@ static unsigned int
[IB_QPT_GSI] = hermon_fill_mlx_send_wqe,
[IB_QPT_UD] = hermon_fill_ud_send_wqe,
[IB_QPT_RC] = hermon_fill_rc_send_wqe,
+ [IB_QPT_ETH] = hermon_fill_eth_send_wqe,
};
/**
@@ -1618,6 +1666,9 @@ static int hermon_complete ( struct ib_device *ibdev,
case IB_QPT_RC:
av = &qp->av;
break;
+ case IB_QPT_ETH:
+ av = NULL;
+ break;
default:
assert ( 0 );
return -EINVAL;
@@ -1826,8 +1877,9 @@ static void hermon_event_port_state_change ( struct hermon *hermon,
return;
}
- /* Update MAD parameters */
- ib_smc_update ( hermon->ibdev[port], hermon_mad );
+ /* Notify device of port state change */
+ hermon->port[port].type->state_change ( hermon, &hermon->port[port],
+ link_up );
}
/**
@@ -1899,39 +1951,6 @@ static void hermon_poll_eq ( struct ib_device *ibdev ) {
*/
/**
- * Sense port type
- *
- * @v ibdev Infiniband device
- * @ret port_type Port type, or negative error
- */
-static int hermon_sense_port_type ( struct ib_device *ibdev ) {
- struct hermon *hermon = ib_get_drvdata ( ibdev );
- struct hermonprm_sense_port sense_port;
- int port_type;
- int rc;
-
- /* If DPDP is not supported, always assume Infiniband */
- if ( ! hermon->cap.dpdp ) {
- DBGC ( hermon, "Hermon %p does not support DPDP; assuming "
- "Infiniband\n", hermon );
- return HERMON_PORT_TYPE_IB;
- }
-
- /* Sense the port type */
- if ( ( rc = hermon_cmd_sense_port ( hermon, ibdev->port,
- &sense_port ) ) != 0 ) {
- DBGC ( hermon, "Hermon %p port %d sense failed: %s\n",
- hermon, ibdev->port, strerror ( rc ) );
- return rc;
- }
- port_type = MLX_GET ( &sense_port, port_type );
-
- DBGC ( hermon, "Hermon %p port %d type %d\n",
- hermon, ibdev->port, port_type );
- return port_type;
-}
-
-/**
* Initialise Infiniband link
*
* @v ibdev Infiniband device
@@ -1940,18 +1959,8 @@ static int hermon_sense_port_type ( struct ib_device *ibdev ) {
static int hermon_open ( struct ib_device *ibdev ) {
struct hermon *hermon = ib_get_drvdata ( ibdev );
union hermonprm_set_port set_port;
- int port_type;
int rc;
- /* Check we are connected to an Infiniband network */
- if ( ( rc = port_type = hermon_sense_port_type ( ibdev ) ) < 0 )
- return rc;
- if ( port_type != HERMON_PORT_TYPE_IB ) {
- DBGC ( hermon, "Hermon %p port %d not connected to an "
- "Infiniband network", hermon, ibdev->port );
- return -ENOTCONN;
- }
-
/* Set port parameters */
memset ( &set_port, 0, sizeof ( set_port ) );
MLX_FILL_7 ( &set_port.ib, 0,
@@ -1965,7 +1974,7 @@ static int hermon_open ( struct ib_device *ibdev ) {
MLX_FILL_2 ( &set_port.ib, 10,
max_pkey, 1,
max_gid, 1 );
- if ( ( rc = hermon_cmd_set_port ( hermon, ibdev->port,
+ if ( ( rc = hermon_cmd_set_port ( hermon, 0, ibdev->port,
&set_port ) ) != 0 ) {
DBGC ( hermon, "Hermon %p port %d could not set port: %s\n",
hermon, ibdev->port, strerror ( rc ) );
@@ -2136,6 +2145,530 @@ static struct ib_device_operations hermon_ib_operations = {
.set_pkey_table = hermon_inform_sma,
};
+/**
+ * Register Hermon Infiniband device
+ *
+ * @v hermon Hermon device
+ * @v port Hermon port
+ * @ret rc Return status code
+ */
+static int hermon_register_ibdev ( struct hermon *hermon,
+ struct hermon_port *port ) {
+ struct ib_device *ibdev = port->ibdev;
+ int rc;
+
+ /* Initialise parameters using SMC */
+ ib_smc_init ( ibdev, hermon_mad );
+
+ /* Register Infiniband device */
+ if ( ( rc = register_ibdev ( ibdev ) ) != 0 ) {
+ DBGC ( hermon, "Hermon %p port %d could not register IB "
+ "device: %s\n", hermon, ibdev->port, strerror ( rc ) );
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * Handle Hermon Infiniband device port state change
+ *
+ * @v hermon Hermon device
+ * @v port Hermon port
+ * @v link_up Link is up
+ */
+static void hermon_state_change_ibdev ( struct hermon *hermon __unused,
+ struct hermon_port *port,
+ int link_up __unused ) {
+ struct ib_device *ibdev = port->ibdev;
+
+ /* Update MAD parameters */
+ ib_smc_update ( ibdev, hermon_mad );
+}
+
+/**
+ * Unregister Hermon Infiniband device
+ *
+ * @v hermon Hermon device
+ * @v port Hermon port
+ */
+static void hermon_unregister_ibdev ( struct hermon *hermon __unused,
+ struct hermon_port *port ) {
+ struct ib_device *ibdev = port->ibdev;
+
+ unregister_ibdev ( ibdev );
+}
+
+/** Hermon Infiniband port type */
+static struct hermon_port_type hermon_port_type_ib = {
+ .register_dev = hermon_register_ibdev,
+ .state_change = hermon_state_change_ibdev,
+ .unregister_dev = hermon_unregister_ibdev,
+};
+
+/***************************************************************************
+ *
+ * Ethernet operation
+ *
+ ***************************************************************************
+ */
+
+/** Number of Hermon Ethernet send work queue entries */
+#define HERMON_ETH_NUM_SEND_WQES 2
+
+/** Number of Hermon Ethernet receive work queue entries */
+#define HERMON_ETH_NUM_RECV_WQES 4
+
+/** Number of Hermon Ethernet completion entries */
+#define HERMON_ETH_NUM_CQES 8
+
+/**
+ * Transmit packet via Hermon Ethernet device
+ *
+ * @v netdev Network device
+ * @v iobuf I/O buffer
+ * @ret rc Return status code
+ */
+static int hermon_eth_transmit ( struct net_device *netdev,
+ struct io_buffer *iobuf ) {
+ struct hermon_port *port = netdev->priv;
+ struct ib_device *ibdev = port->ibdev;
+ struct hermon *hermon = ib_get_drvdata ( ibdev );
+ int rc;
+
+ /* Transmit packet */
+ if ( ( rc = ib_post_send ( ibdev, port->eth_qp, NULL,
+ iobuf ) ) != 0 ) {
+ DBGC ( hermon, "Hermon %p port %d could not transmit: %s\n",
+ hermon, ibdev->port, strerror ( rc ) );
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * Handle Hermon Ethernet device send completion
+ *
+ * @v ibdev Infiniband device
+ * @v qp Queue pair
+ * @v iobuf I/O buffer
+ * @v rc Completion status code
+ */
+static void hermon_eth_complete_send ( struct ib_device *ibdev __unused,
+ struct ib_queue_pair *qp,
+ struct io_buffer *iobuf, int rc ) {
+ struct net_device *netdev = ib_qp_get_ownerdata ( qp );
+
+ netdev_tx_complete_err ( netdev, iobuf, rc );
+}
+
+/**
+ * Handle Hermon Ethernet device receive completion
+ *
+ * @v ibdev Infiniband device
+ * @v qp Queue pair
+ * @v av Address vector, or NULL
+ * @v iobuf I/O buffer
+ * @v rc Completion status code
+ */
+static void hermon_eth_complete_recv ( struct ib_device *ibdev __unused,
+ struct ib_queue_pair *qp,
+ struct ib_address_vector *av __unused,
+ struct io_buffer *iobuf, int rc ) {
+ struct net_device *netdev = ib_qp_get_ownerdata ( qp );
+
+ /* Hand off to network layer */
+ if ( rc == 0 ) {
+ netdev_rx ( netdev, iobuf );
+ } else {
+ netdev_rx_err ( netdev, iobuf, rc );
+ }
+}
+
+/** Hermon Ethernet device completion operations */
+static struct ib_completion_queue_operations hermon_eth_cq_op = {
+ .complete_send = hermon_eth_complete_send,
+ .complete_recv = hermon_eth_complete_recv,
+};
+
+/**
+ * Poll Hermon Ethernet device
+ *
+ * @v netdev Network device
+ */
+static void hermon_eth_poll ( struct net_device *netdev ) {
+ struct hermon_port *port = netdev->priv;
+ struct ib_device *ibdev = port->ibdev;
+
+ ib_poll_eq ( ibdev );
+}
+
+/**
+ * Enable/disable interrupts on Hermon Ethernet device
+ *
+ * @v netdev Network device
+ * @v enable Interrupts should be enabled
+ */
+static void hermon_eth_irq ( struct net_device *netdev __unused,
+ int enable __unused ) {
+ /* No implementation */
+}
+
+/**
+ * Open Hermon Ethernet device
+ *
+ * @v netdev Network device
+ * @ret rc Return status code
+ */
+static int hermon_eth_open ( struct net_device *netdev ) {
+ struct hermon_port *port = netdev->priv;
+ struct ib_device *ibdev = port->ibdev;
+ struct hermon *hermon = ib_get_drvdata ( ibdev );
+ union hermonprm_set_port set_port;
+ int rc;
+
+ /* Allocate completion queue */
+ port->eth_cq = ib_create_cq ( ibdev, HERMON_ETH_NUM_CQES,
+ &hermon_eth_cq_op );
+ if ( ! port->eth_cq ) {
+ DBGC ( hermon, "Hermon %p port %d could not create completion "
+ "queue\n", hermon, ibdev->port );
+ rc = -ENOMEM;
+ goto err_create_cq;
+ }
+
+ /* Allocate queue pair */
+ port->eth_qp = ib_create_qp ( ibdev, IB_QPT_ETH,
+ HERMON_ETH_NUM_SEND_WQES, port->eth_cq,
+ HERMON_ETH_NUM_RECV_WQES, port->eth_cq );
+ if ( ! port->eth_qp ) {
+ DBGC ( hermon, "Hermon %p port %d could not create queue "
+ "pair\n", hermon, ibdev->port );
+ rc = -ENOMEM;
+ goto err_create_qp;
+ }
+ ib_qp_set_ownerdata ( port->eth_qp, netdev );
+
+ /* Activate queue pair */
+ if ( ( rc = ib_modify_qp ( ibdev, port->eth_qp ) ) != 0 ) {
+ DBGC ( hermon, "Hermon %p port %d could not modify queue "
+ "pair: %s\n", hermon, ibdev->port, strerror ( rc ) );
+ goto err_modify_qp;
+ }
+
+ /* Fill receive rings */
+ ib_refill_recv ( ibdev, port->eth_qp );
+
+ /* Set port general parameters */
+ memset ( &set_port, 0, sizeof ( set_port ) );
+ MLX_FILL_3 ( &set_port.general, 0,
+ v_mtu, 1,
+ v_pprx, 1,
+ v_pptx, 1 );
+ MLX_FILL_1 ( &set_port.general, 1,
+ mtu, ( ETH_FRAME_LEN + 40 /* Used by card */ ) );
+ MLX_FILL_1 ( &set_port.general, 2, pptx, 1 );
+ MLX_FILL_1 ( &set_port.general, 3, pprx, 1 );
+ if ( ( rc = hermon_cmd_set_port ( hermon, 1,
+ ( HERMON_SET_PORT_GENERAL_PARAM |
+ ibdev->port ),
+ &set_port ) ) != 0 ) {
+ DBGC ( hermon, "Hermon %p port %d could not set port general "
+ "parameters: %s\n",
+ hermon, ibdev->port, strerror ( rc ) );
+ goto err_set_port_general_params;
+ }
+
+ /* Set port receive QP */
+ memset ( &set_port, 0, sizeof ( set_port ) );
+ MLX_FILL_1 ( &set_port.rqp_calc, 0, base_qpn, port->eth_qp->qpn );
+ MLX_FILL_1 ( &set_port.rqp_calc, 2,
+ mac_miss_index, 128 /* MAC misses go to promisc QP */ );
+ MLX_FILL_2 ( &set_port.rqp_calc, 3,
+ vlan_miss_index, 127 /* VLAN misses go to promisc QP */,
+ no_vlan_index, 126 /* VLAN-free go to promisc QP */ );
+ MLX_FILL_2 ( &set_port.rqp_calc, 5,
+ promisc_qpn, port->eth_qp->qpn,
+ en_uc_promisc, 1 );
+ MLX_FILL_2 ( &set_port.rqp_calc, 6,
+ def_mcast_qpn, port->eth_qp->qpn,
+ mc_promisc_mode, 2 /* Receive all multicasts */ );
+ if ( ( rc = hermon_cmd_set_port ( hermon, 1,
+ ( HERMON_SET_PORT_RECEIVE_QP |
+ ibdev->port ),
+ &set_port ) ) != 0 ) {
+ DBGC ( hermon, "Hermon %p port %d could not set port receive "
+ "QP: %s\n", hermon, ibdev->port, strerror ( rc ) );
+ goto err_set_port_receive_qp;
+ }
+
+ /* Initialise port */
+ if ( ( rc = hermon_cmd_init_port ( hermon, ibdev->port ) ) != 0 ) {
+ DBGC ( hermon, "Hermon %p port %d could not initialise port: "
+ "%s\n", hermon, ibdev->port, strerror ( rc ) );
+ goto err_init_port;
+ }
+
+ return 0;
+
+ err_init_port:
+ err_set_port_receive_qp:
+ err_set_port_general_params:
+ err_modify_qp:
+ ib_destroy_qp ( ibdev, port->eth_qp );
+ err_create_qp:
+ ib_destroy_cq ( ibdev, port->eth_cq );
+ err_create_cq:
+ return rc;
+}
+
+/**
+ * Close Hermon Ethernet device
+ *
+ * @v netdev Network device
+ */
+static void hermon_eth_close ( struct net_device *netdev ) {
+ struct hermon_port *port = netdev->priv;
+ struct ib_device *ibdev = port->ibdev;
+ struct hermon *hermon = ib_get_drvdata ( ibdev );
+ int rc;
+
+ /* Close port */
+ if ( ( rc = hermon_cmd_close_port ( hermon, ibdev->port ) ) != 0 ) {
+ DBGC ( hermon, "Hermon %p port %d could not close port: %s\n",
+ hermon, ibdev->port, strerror ( rc ) );
+ /* Nothing we can do about this */
+ }
+
+ /* Tear down the queues */
+ ib_destroy_qp ( ibdev, port->eth_qp );
+ ib_destroy_cq ( ibdev, port->eth_cq );
+}
+
+/** Hermon Ethernet network device operations */
+static struct net_device_operations hermon_eth_operations = {
+ .open = hermon_eth_open,
+ .close = hermon_eth_close,
+ .transmit = hermon_eth_transmit,
+ .poll = hermon_eth_poll,
+ .irq = hermon_eth_irq,
+};
+
+/**
+ * Register Hermon Ethernet device
+ *
+ * @v hermon Hermon device
+ * @v port Hermon port
+ * @ret rc Return status code
+ */
+static int hermon_register_netdev ( struct hermon *hermon,
+ struct hermon_port *port ) {
+ struct net_device *netdev = port->netdev;
+ struct ib_device *ibdev = port->ibdev;
+ struct hermonprm_query_port_cap query_port;
+ union {
+ uint8_t bytes[8];
+ uint32_t dwords[2];
+ } mac;
+ int rc;
+
+ /* Retrieve MAC address */
+ if ( ( rc = hermon_cmd_query_port ( hermon, ibdev->port,
+ &query_port ) ) != 0 ) {
+ DBGC ( hermon, "Hermon %p port %d could not query port: %s\n",
+ hermon, ibdev->port, strerror ( rc ) );
+ return rc;
+ }
+ mac.dwords[0] = htonl ( MLX_GET ( &query_port, mac_47_32 ) );
+ mac.dwords[1] = htonl ( MLX_GET ( &query_port, mac_31_0 ) );
+ memcpy ( netdev->hw_addr,
+ &mac.bytes[ sizeof ( mac.bytes ) - ETH_ALEN ], ETH_ALEN );
+
+ /* Register network device */
+ if ( ( rc = register_netdev ( netdev ) ) != 0 ) {
+ DBGC ( hermon, "Hermon %p port %d could not register network "
+ "device: %s\n", hermon, ibdev->port, strerror ( rc ) );
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * Handle Hermon Ethernet device port state change
+ *
+ * @v hermon Hermon device
+ * @v port Hermon port
+ * @v link_up Link is up
+ */
+static void hermon_state_change_netdev ( struct hermon *hermon __unused,
+ struct hermon_port *port,
+ int link_up ) {
+ struct net_device *netdev = port->netdev;
+
+ if ( link_up ) {
+ netdev_link_up ( netdev );
+ } else {
+ netdev_link_down ( netdev );
+ }
+}
+
+/**
+ * Unregister Hermon Ethernet device
+ *
+ * @v hermon Hermon device
+ * @v port Hermon port
+ */
+static void hermon_unregister_netdev ( struct hermon *hermon __unused,
+ struct hermon_port *port ) {
+ struct net_device *netdev = port->netdev;
+
+ unregister_netdev ( netdev );
+}
+
+/** Hermon Ethernet port type */
+static struct hermon_port_type hermon_port_type_eth = {
+ .register_dev = hermon_register_netdev,
+ .state_change = hermon_state_change_netdev,
+ .unregister_dev = hermon_unregister_netdev,
+};
+
+/***************************************************************************
+ *
+ * Port type detection
+ *
+ ***************************************************************************
+ */
+
+/** Timeout for port sensing */
+#define HERMON_SENSE_PORT_TIMEOUT ( TICKS_PER_SEC / 2 )
+
+/**
+ * Name port type
+ *
+ * @v port_type Port type
+ * @v port_type_name Port type name
+ */
+static inline const char * hermon_name_port_type ( unsigned int port_type ) {
+ switch ( port_type ) {
+ case HERMON_PORT_TYPE_UNKNOWN: return "unknown";
+ case HERMON_PORT_TYPE_IB: return "Infiniband";
+ case HERMON_PORT_TYPE_ETH: return "Ethernet";
+ default: return "INVALID";
+ }
+}
+
+/**
+ * Sense port type
+ *
+ * @v hermon Hermon device
+ * @v port Hermon port
+ * @ret port_type Port type, or negative error
+ */
+static int hermon_sense_port_type ( struct hermon *hermon,
+ struct hermon_port *port ) {
+ struct ib_device *ibdev = port->ibdev;
+ struct hermonprm_sense_port sense_port;
+ int port_type;
+ int rc;
+
+ /* If DPDP is not supported, always assume Infiniband */
+ if ( ! hermon->cap.dpdp ) {
+ port_type = HERMON_PORT_TYPE_IB;
+ DBGC ( hermon, "Hermon %p port %d does not support DPDP; "
+ "assuming an %s network\n", hermon, ibdev->port,
+ hermon_name_port_type ( port_type ) );
+ return port_type;
+ }
+
+ /* Sense the port type */
+ if ( ( rc = hermon_cmd_sense_port ( hermon, ibdev->port,
+ &sense_port ) ) != 0 ) {
+ DBGC ( hermon, "Hermon %p port %d sense failed: %s\n",
+ hermon, ibdev->port, strerror ( rc ) );
+ return rc;
+ }
+ port_type = MLX_GET ( &sense_port, port_type );
+
+ DBGC ( hermon, "Hermon %p port %d sensed an %s network\n",
+ hermon, ibdev->port, hermon_name_port_type ( port_type ) );
+ return port_type;
+}
+
+/**
+ * Set port type
+ *
+ * @v hermon Hermon device
+ * @v port Hermon port
+ * @ret rc Return status code
+ */
+static int hermon_set_port_type ( struct hermon *hermon,
+ struct hermon_port *port ) {
+ struct ib_device *ibdev = port->ibdev;
+ struct hermonprm_query_port_cap query_port;
+ int ib_supported;
+ int eth_supported;
+ int port_type;
+ unsigned long start;
+ unsigned long elapsed;
+ int rc;
+
+ /* Check to see which types are supported */
+ if ( ( rc = hermon_cmd_query_port ( hermon, ibdev->port,
+ &query_port ) ) != 0 ) {
+ DBGC ( hermon, "Hermon %p port %d could not query port: %s\n",
+ hermon, ibdev->port, strerror ( rc ) );
+ return rc;
+ }
+ ib_supported = MLX_GET ( &query_port, ib );
+ eth_supported = MLX_GET ( &query_port, eth );
+ DBGC ( hermon, "Hermon %p port %d supports%s%s%s\n",
+ hermon, ibdev->port, ( ib_supported ? " Infiniband" : "" ),
+ ( ( ib_supported && eth_supported ) ? " and" : "" ),
+ ( eth_supported ? " Ethernet" : "" ) );
+
+ /* Sense network, if applicable */
+ if ( ib_supported && eth_supported ) {
+
+ /* Both types are supported; try sensing network */
+ start = currticks();
+ do {
+ /* Try sensing port */
+ port_type = hermon_sense_port_type ( hermon, port );
+ if ( port_type < 0 ) {
+ rc = port_type;
+ return rc;
+ }
+ } while ( ( port_type == HERMON_PORT_TYPE_UNKNOWN ) &&
+ ( ( elapsed = ( currticks() - start ) ) <
+ HERMON_SENSE_PORT_TIMEOUT ) );
+
+ /* Set port type based on sensed network, defaulting
+ * to Infiniband if nothing was sensed.
+ */
+ switch ( port_type ) {
+ case HERMON_PORT_TYPE_ETH:
+ port->type = &hermon_port_type_eth;
+ break;
+ case HERMON_PORT_TYPE_IB:
+ case HERMON_PORT_TYPE_UNKNOWN:
+ port->type = &hermon_port_type_ib;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ } else if ( eth_supported ) {
+ port->type = &hermon_port_type_eth;
+ } else {
+ port->type = &hermon_port_type_ib;
+ }
+
+ assert ( port->type != NULL );
+ return 0;
+}
+
/***************************************************************************
*
* Firmware control
@@ -2789,6 +3322,8 @@ static int hermon_probe ( struct pci_device *pci,
const struct pci_device_id *id __unused ) {
struct hermon *hermon;
struct ib_device *ibdev;
+ struct net_device *netdev;
+ struct hermon_port *port;
struct hermonprm_init_hca init_hca;
unsigned int i;
int rc;
@@ -2842,13 +3377,26 @@ static int hermon_probe ( struct pci_device *pci,
rc = -ENOMEM;
goto err_alloc_ibdev;
}
- hermon->ibdev[i] = ibdev;
+ hermon->port[i].ibdev = ibdev;
ibdev->op = &hermon_ib_operations;
ibdev->dev = &pci->dev;
ibdev->port = ( HERMON_PORT_BASE + i );
ib_set_drvdata ( ibdev, hermon );
}
+ /* Allocate network devices */
+ for ( i = 0 ; i < hermon->cap.num_ports ; i++ ) {
+ netdev = alloc_etherdev ( 0 );
+ if ( ! netdev ) {
+ rc = -ENOMEM;
+ goto err_alloc_netdev;
+ }
+ hermon->port[i].netdev = netdev;
+ netdev_init ( netdev, &hermon_eth_operations );
+ netdev->dev = &pci->dev;
+ netdev->priv = &hermon->port[i];
+ }
+
/* Allocate ICM */
memset ( &init_hca, 0, sizeof ( init_hca ) );
if ( ( rc = hermon_alloc_icm ( hermon, &init_hca ) ) != 0 )
@@ -2868,7 +3416,7 @@ static int hermon_probe ( struct pci_device *pci,
if ( ( rc = hermon_setup_mpt ( hermon ) ) != 0 )
goto err_setup_mpt;
for ( i = 0 ; i < hermon->cap.num_ports ; i++ )
- hermon->ibdev[i]->rdma_key = hermon->lkey;
+ hermon->port[i].ibdev->rdma_key = hermon->lkey;
/* Set up event queue */
if ( ( rc = hermon_create_eq ( hermon ) ) != 0 )
@@ -2878,26 +3426,29 @@ static int hermon_probe ( struct pci_device *pci,
if ( ( rc = hermon_configure_special_qps ( hermon ) ) != 0 )
goto err_conf_special_qps;
- /* Initialise parameters using SMC */
- for ( i = 0 ; i < hermon->cap.num_ports ; i++ )
- ib_smc_init ( hermon->ibdev[i], hermon_mad );
+ /* Determine port types */
+ for ( i = 0 ; i < hermon->cap.num_ports ; i++ ) {
+ port = &hermon->port[i];
+ if ( ( rc = hermon_set_port_type ( hermon, port ) ) != 0 )
+ goto err_set_port_type;
+ }
- /* Register Infiniband devices */
+ /* Register devices */
for ( i = 0 ; i < hermon->cap.num_ports ; i++ ) {
- if ( ( rc = register_ibdev ( hermon->ibdev[i] ) ) != 0 ) {
- DBGC ( hermon, "Hermon %p port %d could not register "
- "IB device: %s\n", hermon,
- hermon->ibdev[i]->port, strerror ( rc ) );
- goto err_register_ibdev;
- }
+ port = &hermon->port[i];
+ if ( ( rc = port->type->register_dev ( hermon, port ) ) != 0 )
+ goto err_register;
}
return 0;
i = hermon->cap.num_ports;
- err_register_ibdev:
- for ( i-- ; ( signed int ) i >= 0 ; i-- )
- unregister_ibdev ( hermon->ibdev[i] );
+ err_register:
+ for ( i-- ; ( signed int ) i >= 0 ; i-- ) {
+ port = &hermon->port[i];
+ port->type->unregister_dev ( hermon, port );
+ }
+ err_set_port_type:
err_conf_special_qps:
hermon_destroy_eq ( hermon );
err_create_eq:
@@ -2907,9 +3458,15 @@ static int hermon_probe ( struct pci_device *pci,
hermon_free_icm ( hermon );
err_alloc_icm:
i = hermon->cap.num_ports;
+ err_alloc_netdev:
+ for ( i-- ; ( signed int ) i >= 0 ; i-- ) {
+ netdev_nullify ( hermon->port[i].netdev );
+ netdev_put ( hermon->port[i].netdev );
+ }
+ i = hermon->cap.num_ports;
err_alloc_ibdev:
for ( i-- ; ( signed int ) i >= 0 ; i-- )
- ibdev_put ( hermon->ibdev[i] );
+ ibdev_put ( hermon->port[i].ibdev );
err_get_cap:
hermon_stop_firmware ( hermon );
err_start_firmware:
@@ -2929,10 +3486,13 @@ static int hermon_probe ( struct pci_device *pci,
*/
static void hermon_remove ( struct pci_device *pci ) {
struct hermon *hermon = pci_get_drvdata ( pci );
+ struct hermon_port *port;
int i;
- for ( i = ( hermon->cap.num_ports - 1 ) ; i >= 0 ; i-- )
- unregister_ibdev ( hermon->ibdev[i] );
+ for ( i = ( hermon->cap.num_ports - 1 ) ; i >= 0 ; i-- ) {
+ port = &hermon->port[i];
+ port->type->unregister_dev ( hermon, port );
+ }
hermon_destroy_eq ( hermon );
hermon_cmd_close_hca ( hermon );
hermon_free_icm ( hermon );
@@ -2940,8 +3500,12 @@ static void hermon_remove ( struct pci_device *pci ) {
hermon_stop_firmware ( hermon );
free_dma ( hermon->mailbox_out, HERMON_MBOX_SIZE );
free_dma ( hermon->mailbox_in, HERMON_MBOX_SIZE );
+ for ( i = ( hermon->cap.num_ports - 1 ) ; i >= 0 ; i-- ) {
+ netdev_nullify ( hermon->port[i].netdev );
+ netdev_put ( hermon->port[i].netdev );
+ }
for ( i = ( hermon->cap.num_ports - 1 ) ; i >= 0 ; i-- )
- ibdev_put ( hermon->ibdev[i] );
+ ibdev_put ( hermon->port[i].ibdev );
free ( hermon );
}
@@ -2950,6 +3514,14 @@ static struct pci_device_id hermon_nics[] = {
PCI_ROM ( 0x15b3, 0x634a, "mt25418", "MT25418 HCA driver", 0 ),
PCI_ROM ( 0x15b3, 0x6732, "mt26418", "MT26418 HCA driver", 0 ),
PCI_ROM ( 0x15b3, 0x673c, "mt26428", "MT26428 HCA driver", 0 ),
+ PCI_ROM ( 0x15b3, 0x6746, "mt26438", "MT26438 HCA driver", 0 ),
+ PCI_ROM ( 0x15b3, 0x6778, "mt26488", "MT26488 HCA driver", 0 ),
+ PCI_ROM ( 0x15b3, 0x6368, "mt25448", "MT25448 HCA driver", 0 ),
+ PCI_ROM ( 0x15b3, 0x6750, "mt26448", "MT26448 HCA driver", 0 ),
+ PCI_ROM ( 0x15b3, 0x6372, "mt25458", "MT25458 HCA driver", 0 ),
+ PCI_ROM ( 0x15b3, 0x675a, "mt26458", "MT26458 HCA driver", 0 ),
+ PCI_ROM ( 0x15b3, 0x6764, "mt26468", "MT26468 HCA driver", 0 ),
+ PCI_ROM ( 0x15b3, 0x676e, "mt26478", "MT26478 HCA driver", 0 ),
};
struct pci_driver hermon_driver __pci_driver = {
diff --git a/src/drivers/infiniband/hermon.h b/src/drivers/infiniband/hermon.h
index a0f4a257..7f00efcd 100644
--- a/src/drivers/infiniband/hermon.h
+++ b/src/drivers/infiniband/hermon.h
@@ -94,6 +94,7 @@ FILE_LICENCE ( GPL2_OR_LATER );
/* MTUs */
#define HERMON_MTU_2048 0x04
+#define HERMON_MTU_ETH 0x07
#define HERMON_INVALID_LKEY 0x00000100UL
@@ -110,6 +111,13 @@ FILE_LICENCE ( GPL2_OR_LATER );
#define HERMON_MAP_EQ ( 0UL << 31 )
#define HERMON_UNMAP_EQ ( 1UL << 31 )
+#define HERMON_SET_PORT_GENERAL_PARAM 0x0000
+#define HERMON_SET_PORT_RECEIVE_QP 0x0100
+#define HERMON_SET_PORT_MAC_TABLE 0x0200
+#define HERMON_SET_PORT_VLAN_TABLE 0x0300
+#define HERMON_SET_PORT_PRIORITY_TABLE 0x0400
+#define HERMON_SET_PORT_GID_TABLE 0x0500
+
#define HERMON_EV_PORT_STATE_CHANGE 0x09
#define HERMON_SCHED_QP0 0x3f
@@ -449,6 +457,11 @@ struct hermonprm_rc_send_wqe {
struct hermonprm_wqe_segment_data_ptr data[HERMON_MAX_GATHER];
} __attribute__ (( packed ));
+struct hermonprm_eth_send_wqe {
+ struct hermonprm_wqe_segment_ctrl_send ctrl;
+ struct hermonprm_wqe_segment_data_ptr data[HERMON_MAX_GATHER];
+} __attribute__ (( packed ));
+
#define HERMON_MAX_SCATTER 1
struct hermonprm_recv_wqe {
@@ -584,6 +597,7 @@ union hermon_send_wqe {
struct hermonprm_ud_send_wqe ud;
struct hermonprm_mlx_send_wqe mlx;
struct hermonprm_rc_send_wqe rc;
+ struct hermonprm_eth_send_wqe eth;
uint8_t force_align[HERMON_SEND_WQE_ALIGN];
} __attribute__ (( packed ));
@@ -720,6 +734,51 @@ typedef uint32_t hermon_bitmask_t;
( ( (max_entries) + ( 8 * sizeof ( hermon_bitmask_t ) ) - 1 ) / \
( 8 * sizeof ( hermon_bitmask_t ) ) )
+struct hermon;
+struct hermon_port;
+
+/** A Hermon port type */
+struct hermon_port_type {
+ /** Register port
+ *
+ * @v hermon Hermon device
+ * @v port Hermon port
+ * @ret rc Return status code
+ */
+ int ( * register_dev ) ( struct hermon *hermon,
+ struct hermon_port *port );
+ /** Port state changed
+ *
+ * @v hermon Hermon device
+ * @v port Hermon port
+ * @v link_up Link is up
+ */
+ void ( * state_change ) ( struct hermon *hermon,
+ struct hermon_port *port,
+ int link_up );
+ /** Unregister port
+ *
+ * @v hermon Hermon device
+ * @v port Hermon port
+ */
+ void ( * unregister_dev ) ( struct hermon *hermon,
+ struct hermon_port *port );
+};
+
+/** A Hermon port */
+struct hermon_port {
+ /** Infiniband device */
+ struct ib_device *ibdev;
+ /** Network device */
+ struct net_device *netdev;
+ /** Ethernet completion queue */
+ struct ib_completion_queue *eth_cq;
+ /** Ethernet queue pair */
+ struct ib_queue_pair *eth_qp;
+ /** Port type */
+ struct hermon_port_type *type;
+};
+
/** A Hermon device */
struct hermon {
/** PCI configuration registers */
@@ -763,8 +822,8 @@ struct hermon {
/** QPN base */
unsigned long qpn_base;
- /** Infiniband devices */
- struct ib_device *ibdev[HERMON_MAX_PORTS];
+ /** Ports */
+ struct hermon_port port[HERMON_MAX_PORTS];
};
/** Global protection domain */
diff --git a/src/drivers/net/mtnic.c b/src/drivers/net/mtnic.c
deleted file mode 100644
index d0da2bdc..00000000
--- a/src/drivers/net/mtnic.c
+++ /dev/null
@@ -1,1853 +0,0 @@
-/*
- * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-FILE_LICENCE ( GPL2_ONLY );
-
-#include <strings.h>
-#include <errno.h>
-#include <ipxe/malloc.h>
-#include <ipxe/umalloc.h>
-#include <byteswap.h>
-#include <unistd.h>
-#include <ipxe/io.h>
-#include <ipxe/pci.h>
-#include <ipxe/ethernet.h>
-#include <ipxe/netdevice.h>
-#include <ipxe/iobuf.h>
-#include "mtnic.h"
-
-
-/*
-
-
- mtnic.c - iPXE driver for Mellanox 10Gig ConnectX EN
-
-
-*/
-
-
-
-/********************************************************************
-*
-* MTNIC allocation functions
-*
-*********************************************************************/
-/**
-* mtnic_alloc_aligned
-*
-* @v unsigned int size size
-* @v void **va virtual address
-* @v u32 *pa physical address
-* @v u32 aligment aligment
-*
-* Function allocate aligned buffer and put it's virtual address in 'va'
-* and it's physical aligned address in 'pa'
-*/
-static int
-mtnic_alloc_aligned(unsigned int size, void **va, unsigned long *pa, unsigned int alignment)
-{
- *va = malloc_dma(size, alignment);
- if (!*va) {
- return -EADDRINUSE;
- }
- *pa = (u32)virt_to_bus(*va);
- return 0;
-}
-
-
-
-/**
- *
- * mtnic alloc command interface
- *
- */
-static int
-mtnic_alloc_cmdif(struct mtnic *mtnic)
-{
- u32 bar = mtnic_pci_dev.dev.bar[0];
-
- mtnic->hcr = ioremap(bar + MTNIC_HCR_BASE, MTNIC_HCR_SIZE);
- if ( !mtnic->hcr ) {
- DBG("Couldn't map command register\n");
- return -EADDRINUSE;
- }
- mtnic_alloc_aligned(PAGE_SIZE, (void *)&mtnic->cmd.buf, &mtnic->cmd.mapping, PAGE_SIZE);
- if ( !mtnic->cmd.buf ) {
- DBG("Error in allocating buffer for command interface\n");
- return -EADDRINUSE;
- }
- return 0;
-}
-
-/**
- * Free RX io buffers
- */
-static void
-mtnic_free_io_buffers(struct mtnic_ring *ring)
-{
- int index;
-
- for (; ring->cons <= ring->prod; ++ring->cons) {
- index = ring->cons & ring->size_mask;
- if ( ring->iobuf[index] ) {
- free_iob(ring->iobuf[index]);
- }
- }
-}
-
-
-
-/**
- *
- * mtnic alloc and attach io buffers
- *
- */
-static int
-mtnic_alloc_iobuf(struct mtnic_port *priv, struct mtnic_ring *ring,
- unsigned int size)
-{
- struct mtnic_rx_desc *rx_desc_ptr = ring->buf;
- u32 index;
-
- while ((u32)(ring->prod - ring->cons) < UNITS_BUFFER_SIZE) {
- index = ring->prod & ring->size_mask;
- ring->iobuf[index] = alloc_iob(size);
- if (!ring->iobuf[index]) {
- if (ring->prod <= (ring->cons + 1)) {
- DBG ( "Dropping packet, buffer is full\n" );
- }
- break;
- }
-
- /* Attach io_buffer to descriptor */
- rx_desc_ptr = ring->buf +
- (sizeof(struct mtnic_rx_desc) * index);
- rx_desc_ptr->data.count = cpu_to_be32(size);
- rx_desc_ptr->data.mem_type = priv->mtnic->fw.mem_type_snoop_be;
- rx_desc_ptr->data.addr_l = cpu_to_be32(
- virt_to_bus(ring->iobuf[index]->data));
-
- ++ ring->prod;
- }
-
- /* Update RX producer index (PI) */
- ring->db->count = cpu_to_be32(ring->prod & 0xffff);
- return 0;
-}
-
-
-/**
- * mtnic alloc ring
- *
- * Alloc and configure TX or RX ring
- *
- */
-static int
-mtnic_alloc_ring(struct mtnic_port *priv, struct mtnic_ring *ring,
- u32 size, u16 stride, u16 cq, u8 is_rx)
-{
- unsigned int i;
- int err;
- struct mtnic_rx_desc *rx_desc;
- struct mtnic_tx_desc *tx_desc;
-
- ring->size = size; /* Number of descriptors */
- ring->size_mask = size - 1;
- ring->stride = stride; /* Size of each entry */
- ring->cq = cq; /* CQ number associated with this ring */
- ring->cons = 0;
- ring->prod = 0;
-
- /* Alloc descriptors buffer */
- ring->buf_size = ring->size * ((is_rx) ? sizeof(struct mtnic_rx_desc) :
- sizeof(struct mtnic_tx_desc));
- err = mtnic_alloc_aligned(ring->buf_size, (void *)&ring->buf,
- &ring->dma, PAGE_SIZE);
- if (err) {
- DBG("Failed allocating descriptor ring sizeof %x\n",
- ring->buf_size);
- return -EADDRINUSE;
- }
- memset(ring->buf, 0, ring->buf_size);
-
- DBG("Allocated %s ring (addr:%p) - buf:%p size:%x"
- "buf_size:%x dma:%lx\n",
- is_rx ? "Rx" : "Tx", ring, ring->buf, ring->size,
- ring->buf_size, ring->dma);
-
-
- if (is_rx) { /* RX ring */
- /* Alloc doorbell */
- err = mtnic_alloc_aligned(sizeof(struct mtnic_cq_db_record),
- (void *)&ring->db, &ring->db_dma, 32);
- if (err) {
- DBG("Failed allocating Rx ring doorbell record\n");
- free_dma(ring->buf, ring->buf_size);
- return -EADDRINUSE;
- }
-
- /* ==- Configure Descriptor -== */
- /* Init ctrl seg of rx desc */
- for (i = 0; i < UNITS_BUFFER_SIZE; ++i) {
- rx_desc = ring->buf +
- (sizeof(struct mtnic_rx_desc) * i);
- /* Pre-link descriptor */
- rx_desc->next = cpu_to_be16(i + 1);
- }
- /*The last ctrl descriptor is '0' and points to the first one*/
-
- /* Alloc IO_BUFFERS */
- err = mtnic_alloc_iobuf ( priv, ring, DEF_IOBUF_SIZE );
- if (err) {
- DBG("ERROR Allocating io buffer\n");
- free_dma(ring->buf, ring->buf_size);
- return -EADDRINUSE;
- }
-
- } else { /* TX ring */
- /* Set initial ownership of all Tx Desc' to SW (1) */
- for (i = 0; i < ring->size; i++) {
- tx_desc = ring->buf + ring->stride * i;
- tx_desc->ctrl.op_own = cpu_to_be32(MTNIC_BIT_DESC_OWN);
- }
- /* DB */
- ring->db_offset = cpu_to_be32(
- ((u32) priv->mtnic->fw.tx_offset[priv->port]) << 8);
-
- /* Map Tx+CQ doorbells */
- DBG("Mapping TxCQ doorbell at offset:0x%x\n",
- priv->mtnic->fw.txcq_db_offset);
- ring->txcq_db = ioremap(mtnic_pci_dev.dev.bar[2] +
- priv->mtnic->fw.txcq_db_offset, PAGE_SIZE);
- if (!ring->txcq_db) {
- DBG("Couldn't map txcq doorbell, aborting...\n");
- free_dma(ring->buf, ring->buf_size);
- return -EADDRINUSE;
- }
- }
-
- return 0;
-}
-
-
-
-/**
- * mtnic alloc CQ
- *
- * Alloc and configure CQ.
- *
- */
-static int
-mtnic_alloc_cq(struct net_device *dev, int num, struct mtnic_cq *cq,
- u8 is_rx, u32 size, u32 offset_ind)
-{
- int err ;
- unsigned int i;
-
- cq->num = num;
- cq->dev = dev;
- cq->size = size;
- cq->last = 0;
- cq->is_rx = is_rx;
- cq->offset_ind = offset_ind;
-
- /* Alloc doorbell */
- err = mtnic_alloc_aligned(sizeof(struct mtnic_cq_db_record),
- (void *)&cq->db, &cq->db_dma, 32);
- if (err) {
- DBG("Failed allocating CQ doorbell record\n");
- return -EADDRINUSE;
- }
- memset(cq->db, 0, sizeof(struct mtnic_cq_db_record));
-
- /* Alloc CQEs buffer */
- cq->buf_size = size * sizeof(struct mtnic_cqe);
- err = mtnic_alloc_aligned(cq->buf_size,
- (void *)&cq->buf, &cq->dma, PAGE_SIZE);
- if (err) {
- DBG("Failed allocating CQ buffer\n");
- free_dma(cq->db, sizeof(struct mtnic_cq_db_record));
- return -EADDRINUSE;
- }
- memset(cq->buf, 0, cq->buf_size);
- DBG("Allocated CQ (addr:%p) - size:%x buf:%p buf_size:%x "
- "dma:%lx db:%p db_dma:%lx\n"
- "cqn offset:%x \n", cq, cq->size, cq->buf,
- cq->buf_size, cq->dma, cq->db,
- cq->db_dma, offset_ind);
-
-
- /* Set ownership of all CQEs to HW */
- DBG("Setting HW ownership for CQ:%d\n", num);
- for (i = 0; i < cq->size; i++) {
- /* Initial HW ownership is 1 */
- cq->buf[i].op_tr_own = MTNIC_BIT_CQ_OWN;
- }
- return 0;
-}
-
-
-
-/**
- * mtnic_alloc_resources
- *
- * Alloc and configure CQs, Tx, Rx
- */
-unsigned int
-mtnic_alloc_resources(struct net_device *dev)
-{
- struct mtnic_port *priv = netdev_priv(dev);
- int err;
- int cq_ind = 0;
- int cq_offset = priv->mtnic->fw.cq_offset;
-
- /* Alloc 1st CQ */
- err = mtnic_alloc_cq(dev, cq_ind, &priv->cq[cq_ind], 1 /* RX */,
- UNITS_BUFFER_SIZE, cq_offset + cq_ind);
- if (err) {
- DBG("Failed allocating Rx CQ\n");
- return -EADDRINUSE;
- }
-
-
- /* Alloc RX */
- err = mtnic_alloc_ring(priv, &priv->rx_ring, UNITS_BUFFER_SIZE,
- sizeof(struct mtnic_rx_desc), cq_ind, /* RX */1);
- if (err) {
- DBG("Failed allocating Rx Ring\n");
- goto cq0_error;
- }
-
-
- ++cq_ind;
-
- /* alloc 2nd CQ */
- err = mtnic_alloc_cq(dev, cq_ind, &priv->cq[cq_ind], 0 /* TX */,
- UNITS_BUFFER_SIZE, cq_offset + cq_ind);
- if (err) {
- DBG("Failed allocating Tx CQ\n");
- goto rx_error;
- }
-
- /* Alloc TX */
- err = mtnic_alloc_ring(priv, &priv->tx_ring, UNITS_BUFFER_SIZE,
- sizeof(struct mtnic_tx_desc), cq_ind, /* TX */ 0);
- if (err) {
- DBG("Failed allocating Tx ring\n");
- goto cq1_error;
- }
-
- return 0;
-
-cq1_error:
- free_dma(priv->cq[1].buf, priv->cq[1].buf_size);
- free_dma(priv->cq[1].db, sizeof(struct mtnic_cq_db_record));
-
-rx_error:
- free_dma(priv->rx_ring.buf, priv->rx_ring.buf_size);
- free_dma(priv->rx_ring.db, sizeof(struct mtnic_cq_db_record));
- mtnic_free_io_buffers(&priv->rx_ring);
-cq0_error:
- free_dma(priv->cq[0].buf, priv->cq[0].buf_size);
- free_dma(priv->cq[0].db, sizeof(struct mtnic_cq_db_record));
-
- return -EADDRINUSE;
-}
-
-
-/**
- * mtnic alloc_eq
- *
- * Note: EQ is not used by the driver but must be allocated
- */
-static int
-mtnic_alloc_eq(struct mtnic *mtnic)
-{
- int err;
- unsigned int i;
- struct mtnic_eqe *eqe_desc = NULL;
-
- /* Allocating doorbell */
- mtnic->eq_db = ioremap(mtnic_pci_dev.dev.bar[2] +
- mtnic->fw.eq_db_offset, sizeof(u32));
- if (!mtnic->eq_db) {
- DBG("Couldn't map EQ doorbell, aborting...\n");
- return -EADDRINUSE;
- }
-
- /* Allocating buffer */
- mtnic->eq.size = NUM_EQES;
- mtnic->eq.buf_size = mtnic->eq.size * sizeof(struct mtnic_eqe);
- err = mtnic_alloc_aligned(mtnic->eq.buf_size, (void *)&mtnic->eq.buf,
- &mtnic->eq.dma, PAGE_SIZE);
- if (err) {
- DBG("Failed allocating EQ buffer\n");
- iounmap(mtnic->eq_db);
- return -EADDRINUSE;
- }
- memset(mtnic->eq.buf, 0, mtnic->eq.buf_size);
-
- for (i = 0; i < mtnic->eq.size; i++)
- eqe_desc = mtnic->eq.buf + (sizeof(struct mtnic_eqe) * i);
- eqe_desc->own |= MTNIC_BIT_EQE_OWN;
-
- mdelay(20);
- return 0;
-}
-
-
-
-
-
-
-
-
-
-
-
-/********************************************************************
-*
-* Mtnic commands functions
-* -=-=-=-=-=-=-=-=-=-=-=-=
-*
-*
-*
-*********************************************************************/
-static inline int
-cmdif_go_bit(struct mtnic *mtnic)
-{
- struct mtnic_if_cmd_reg *hcr = mtnic->hcr;
- u32 status;
- int i;
-
- for (i = 0; i < TBIT_RETRIES; i++) {
- status = be32_to_cpu(readl(&hcr->status_go_opcode));
- if ((status & MTNIC_BC_MASK(MTNIC_MASK_CMD_REG_T_BIT)) ==
- (mtnic->cmd.tbit << MTNIC_BC_OFF(MTNIC_MASK_CMD_REG_T_BIT))) {
- /* Read expected t-bit - now return go-bit value */
- return status & MTNIC_BC_MASK(MTNIC_MASK_CMD_REG_GO_BIT);
- }
- }
-
- DBG("Invalid tbit after %d retries!\n", TBIT_RETRIES);
- return -EBUSY; /* Return busy... */
-}
-
-/* Base Command interface */
-static int
-mtnic_cmd(struct mtnic *mtnic, void *in_imm,
- void *out_imm, u32 in_modifier, u16 op)
-{
-
- struct mtnic_if_cmd_reg *hcr = mtnic->hcr;
- int err = 0;
- u32 out_param_h = 0;
- u32 out_param_l = 0;
- u32 in_param_h = 0;
- u32 in_param_l = 0;
-
-
- static u16 token = 0x8000;
- u32 status;
- unsigned int timeout = 0;
-
- token++;
-
- if ( cmdif_go_bit ( mtnic ) ) {
- DBG("GO BIT BUSY:%p.\n", hcr + 6);
- err = -EBUSY;
- goto out;
- }
- if (in_imm) {
- in_param_h = *((u32*)in_imm);
- in_param_l = *((u32*)in_imm + 1);
- } else {
- in_param_l = cpu_to_be32(mtnic->cmd.mapping);
- }
- out_param_l = cpu_to_be32(mtnic->cmd.mapping);
-
- /* writing to MCR */
- writel(in_param_h, &hcr->in_param_h);
- writel(in_param_l, &hcr->in_param_l);
- writel((u32) cpu_to_be32(in_modifier), &hcr->input_modifier);
- writel(out_param_h, &hcr->out_param_h);
- writel(out_param_l, &hcr->out_param_l);
- writel((u32)cpu_to_be32(token << 16), &hcr->token);
- wmb();
-
- /* flip toggle bit before each write to the HCR */
- mtnic->cmd.tbit = !mtnic->cmd.tbit;
- writel( ( u32 )
- cpu_to_be32(MTNIC_BC_MASK(MTNIC_MASK_CMD_REG_GO_BIT) |
- ( mtnic->cmd.tbit << MTNIC_BC_OFF ( MTNIC_MASK_CMD_REG_T_BIT ) ) | op ),
- &hcr->status_go_opcode);
-
- while ( cmdif_go_bit ( mtnic ) && ( timeout <= GO_BIT_TIMEOUT ) ) {
- mdelay ( 1 );
- ++timeout;
- }
-
- if ( cmdif_go_bit ( mtnic ) ) {
- DBG("Command opcode:0x%x token:0x%x TIMEOUT.\n", op, token);
- err = -EBUSY;
- goto out;
- }
-
- if (out_imm) {
- *((u32 *)out_imm) = readl(&hcr->out_param_h);
- *((u32 *)out_imm + 1) = readl(&hcr->out_param_l);
- }
-
- status = be32_to_cpu((u32)readl(&hcr->status_go_opcode)) >> 24;
-
- if (status) {
- DBG("Command opcode:0x%x token:0x%x returned:0x%x\n",
- op, token, status);
- return status;
- }
-
-out:
- return err;
-}
-
-/* MAP PAGES wrapper */
-static int
-mtnic_map_cmd(struct mtnic *mtnic, u16 op, struct mtnic_pages pages)
-{
- unsigned int j;
- u32 addr;
- unsigned int len;
- u32 *page_arr = mtnic->cmd.buf;
- int nent = 0;
- int err = 0;
-
- memset(page_arr, 0, PAGE_SIZE);
-
- len = PAGE_SIZE * pages.num;
- pages.buf = (u32 *)umalloc(PAGE_SIZE * (pages.num + 1));
- addr = PAGE_SIZE + ((virt_to_bus(pages.buf) & 0xfffff000) + PAGE_SIZE);
- DBG("Mapping pages: size: %x address: %p\n", pages.num, pages.buf);
-
- if (addr & (PAGE_MASK)) {
- DBG("Got FW area not aligned to %d (%llx/%x)\n",
- PAGE_SIZE, (u64) addr, len);
- return -EADDRINUSE;
- }
-
- /* Function maps each PAGE seperately */
- for (j = 0; j < len; j+= PAGE_SIZE) {
- page_arr[nent * 4 + 3] = cpu_to_be32(addr + j);
- if (++nent == MTNIC_MAILBOX_SIZE / 16) {
- err = mtnic_cmd(mtnic, NULL, NULL, nent, op);
- if (err)
- return -EIO;
- nent = 0;
- }
- }
-
- if (nent) {
- err = mtnic_cmd(mtnic, NULL, NULL, nent, op);
- }
- return err;
-}
-
-
-
-/*
- * Query FW
- */
-static int
-mtnic_QUERY_FW ( struct mtnic *mtnic )
-{
- int err;
- struct mtnic_if_query_fw_out_mbox *cmd = mtnic->cmd.buf;
-
- err = mtnic_cmd(mtnic, NULL, NULL, 0, MTNIC_IF_CMD_QUERY_FW);
- if (err)
- return -EIO;
-
- /* Get FW and interface versions */
- mtnic->fw_ver = ((u64) be16_to_cpu(cmd->rev_maj) << 32) |
- ((u64) be16_to_cpu(cmd->rev_min) << 16) |
- (u64) be16_to_cpu(cmd->rev_smin);
- mtnic->fw.ifc_rev = be16_to_cpu(cmd->ifc_rev);
-
- /* Get offset for internal error reports (debug) */
- mtnic->fw.err_buf.offset = be64_to_cpu(cmd->err_buf_start);
- mtnic->fw.err_buf.size = be32_to_cpu(cmd->err_buf_size);
-
- DBG("Error buf offset is %llx\n", mtnic->fw.err_buf.offset);
-
- /* Get number of required FW (4k) pages */
- mtnic->fw.fw_pages.num = be16_to_cpu(cmd->fw_pages);
-
- return 0;
-}
-
-
-static int
-mtnic_OPEN_NIC(struct mtnic *mtnic)
-{
- struct mtnic_if_open_nic_in_mbox *open_nic = mtnic->cmd.buf;
- u32 extra_pages[2] = {0};
- int err;
-
- memset(open_nic, 0, sizeof *open_nic);
-
- /* port 1 */
- open_nic->log_rx_p1 = 0;
- open_nic->log_cq_p1 = 1;
-
- open_nic->log_tx_p1 = 0;
- open_nic->steer_p1 = MTNIC_IF_STEER_RSS;
- /* MAC + VLAN - leave reserved */
-
- /* port 2 */
- open_nic->log_rx_p2 = 0;
- open_nic->log_cq_p2 = 1;
-
- open_nic->log_tx_p2 = 0;
- open_nic->steer_p2 = MTNIC_IF_STEER_RSS;
- /* MAC + VLAN - leave reserved */
-
- err = mtnic_cmd(mtnic, NULL, extra_pages, 0, MTNIC_IF_CMD_OPEN_NIC);
-
- mtnic->fw.extra_pages.num = be32_to_cpu(*(extra_pages+1));
- DBG("Extra pages num is %x\n", mtnic->fw.extra_pages.num);
- return err;
-}
-
-static int
-mtnic_CONFIG_RX(struct mtnic *mtnic)
-{
- struct mtnic_if_config_rx_in_imm config_rx;
-
- memset(&config_rx, 0, sizeof config_rx);
- return mtnic_cmd(mtnic, &config_rx, NULL, 0, MTNIC_IF_CMD_CONFIG_RX);
-}
-
-static int
-mtnic_CONFIG_TX(struct mtnic *mtnic)
-{
- struct mtnic_if_config_send_in_imm config_tx;
-
- config_tx.enph_gpf = 0;
- return mtnic_cmd(mtnic, &config_tx, NULL, 0, MTNIC_IF_CMD_CONFIG_TX);
-}
-
-static int
-mtnic_HEART_BEAT(struct mtnic_port *priv, u32 *link_state)
-{
- struct mtnic_if_heart_beat_out_imm heart_beat;
-
- int err;
- u32 flags;
- err = mtnic_cmd(priv->mtnic, NULL, &heart_beat, 0, MTNIC_IF_CMD_HEART_BEAT);
- if (!err) {
- flags = be32_to_cpu(heart_beat.flags);
- if (flags & MTNIC_BC_MASK(MTNIC_MASK_HEAR_BEAT_INT_ERROR)) {
- DBG("Internal error detected\n");
- return -EIO;
- }
- *link_state = flags &
- ~((u32) MTNIC_BC_MASK(MTNIC_MASK_HEAR_BEAT_INT_ERROR));
- }
- return err;
-}
-
-
-/*
- * Port commands
- */
-
-static int
-mtnic_SET_PORT_DEFAULT_RING(struct mtnic_port *priv, u8 port, u16 ring)
-{
- struct mtnic_if_set_port_default_ring_in_imm def_ring;
-
- memset(&def_ring, 0, sizeof(def_ring));
- def_ring.ring = ring;
- return mtnic_cmd(priv->mtnic, &def_ring, NULL, port + 1,
- MTNIC_IF_CMD_SET_PORT_DEFAULT_RING);
-}
-
-static int
-mtnic_CONFIG_PORT_RSS_STEER(struct mtnic_port *priv, int port)
-{
- memset(priv->mtnic->cmd.buf, 0, PAGE_SIZE);
- return mtnic_cmd(priv->mtnic, NULL, NULL, port + 1,
- MTNIC_IF_CMD_CONFIG_PORT_RSS_STEER);
-}
-
-static int
-mtnic_SET_PORT_RSS_INDIRECTION(struct mtnic_port *priv, int port)
-
-{
- memset(priv->mtnic->cmd.buf, 0, PAGE_SIZE);
- return mtnic_cmd(priv->mtnic, NULL, NULL, port + 1,
- MTNIC_IF_CMD_SET_PORT_RSS_INDIRECTION);
-}
-
-
-/*
- * Config commands
- */
-static int
-mtnic_CONFIG_CQ(struct mtnic_port *priv, int port,
- u16 cq_ind, struct mtnic_cq *cq)
-{
- struct mtnic_if_config_cq_in_mbox *config_cq = priv->mtnic->cmd.buf;
-
- memset(config_cq, 0, sizeof *config_cq);
- config_cq->cq = cq_ind;
- config_cq->size = fls(UNITS_BUFFER_SIZE - 1);
- config_cq->offset = ((cq->dma) & (PAGE_MASK)) >> 6;
- config_cq->db_record_addr_l = cpu_to_be32(cq->db_dma);
- config_cq->page_address[1] = cpu_to_be32(cq->dma);
- DBG("config cq address: %x dma_address: %lx"
- "offset: %d size %d index: %d\n"
- , config_cq->page_address[1],cq->dma,
- config_cq->offset, config_cq->size, config_cq->cq );
-
- return mtnic_cmd(priv->mtnic, NULL, NULL, port + 1,
- MTNIC_IF_CMD_CONFIG_CQ);
-}
-
-
-static int
-mtnic_CONFIG_TX_RING(struct mtnic_port *priv, u8 port,
- u16 ring_ind, struct mtnic_ring *ring)
-{
- struct mtnic_if_config_send_ring_in_mbox *config_tx_ring = priv->mtnic->cmd.buf;
- memset(config_tx_ring, 0, sizeof *config_tx_ring);
- config_tx_ring->ring = cpu_to_be16(ring_ind);
- config_tx_ring->size = fls(UNITS_BUFFER_SIZE - 1);
- config_tx_ring->cq = cpu_to_be16(ring->cq);
- config_tx_ring->page_address[1] = cpu_to_be32(ring->dma);
-
- return mtnic_cmd(priv->mtnic, NULL, NULL, port + 1,
- MTNIC_IF_CMD_CONFIG_TX_RING);
-}
-
-static int
-mtnic_CONFIG_RX_RING(struct mtnic_port *priv, u8 port,
- u16 ring_ind, struct mtnic_ring *ring)
-{
- struct mtnic_if_config_rx_ring_in_mbox *config_rx_ring = priv->mtnic->cmd.buf;
- memset(config_rx_ring, 0, sizeof *config_rx_ring);
- config_rx_ring->ring = ring_ind;
- MTNIC_BC_PUT(config_rx_ring->stride_size, fls(UNITS_BUFFER_SIZE - 1),
- MTNIC_MASK_CONFIG_RX_RING_SIZE);
- MTNIC_BC_PUT(config_rx_ring->stride_size, 1,
- MTNIC_MASK_CONFIG_RX_RING_STRIDE);
- config_rx_ring->cq = cpu_to_be16(ring->cq);
- config_rx_ring->db_record_addr_l = cpu_to_be32(ring->db_dma);
-
- DBG("Config RX ring starting at address:%lx\n", ring->dma);
-
- config_rx_ring->page_address[1] = cpu_to_be32(ring->dma);
-
- return mtnic_cmd(priv->mtnic, NULL, NULL, port + 1,
- MTNIC_IF_CMD_CONFIG_RX_RING);
-}
-
-static int
-mtnic_CONFIG_EQ(struct mtnic *mtnic)
-{
- struct mtnic_if_config_eq_in_mbox *eq = mtnic->cmd.buf;
-
- if (mtnic->eq.dma & (PAGE_MASK)) {
- DBG("misalligned eq buffer:%lx\n",
- mtnic->eq.dma);
- return -EADDRINUSE;
- }
-
- memset(eq, 0, sizeof *eq);
- MTNIC_BC_PUT(eq->offset, mtnic->eq.dma >> 6, MTNIC_MASK_CONFIG_EQ_OFFSET);
- MTNIC_BC_PUT(eq->size, fls(mtnic->eq.size - 1) - 1, MTNIC_MASK_CONFIG_EQ_SIZE);
- MTNIC_BC_PUT(eq->int_vector, 0, MTNIC_MASK_CONFIG_EQ_INT_VEC);
- eq->page_address[1] = cpu_to_be32(mtnic->eq.dma);
-
- return mtnic_cmd(mtnic, NULL, NULL, 0, MTNIC_IF_CMD_CONFIG_EQ);
-}
-
-
-
-
-static int
-mtnic_SET_RX_RING_ADDR(struct mtnic_port *priv, u8 port, u64* mac)
-{
- struct mtnic_if_set_rx_ring_addr_in_imm ring_addr;
- u32 modifier = ((u32) port + 1) << 16;
-
- memset(&ring_addr, 0, sizeof(ring_addr));
-
- ring_addr.mac_31_0 = cpu_to_be32(*mac & 0xffffffff);
- ring_addr.mac_47_32 = cpu_to_be16((*mac >> 32) & 0xffff);
- ring_addr.flags_vlan_id |= cpu_to_be16(
- MTNIC_BC_MASK(MTNIC_MASK_SET_RX_RING_ADDR_BY_MAC));
-
- return mtnic_cmd(priv->mtnic, &ring_addr, NULL, modifier, MTNIC_IF_CMD_SET_RX_RING_ADDR);
-}
-
-static int
-mtnic_SET_PORT_STATE(struct mtnic_port *priv, u8 port, u8 state)
-{
- struct mtnic_if_set_port_state_in_imm port_state;
-
- port_state.state = state ? cpu_to_be32(
- MTNIC_BC_MASK(MTNIC_MASK_CONFIG_PORT_STATE)) : 0;
- port_state.reserved = 0;
- return mtnic_cmd(priv->mtnic, &port_state, NULL, port + 1,
- MTNIC_IF_CMD_SET_PORT_STATE);
-}
-
-static int
-mtnic_SET_PORT_MTU(struct mtnic_port *priv, u8 port, u16 mtu)
-{
- struct mtnic_if_set_port_mtu_in_imm set_mtu;
-
- memset(&set_mtu, 0, sizeof(set_mtu));
- set_mtu.mtu = cpu_to_be16(mtu);
- return mtnic_cmd(priv->mtnic, &set_mtu, NULL, port + 1,
- MTNIC_IF_CMD_SET_PORT_MTU);
-}
-
-/*
-static int
-mtnic_CONFIG_PORT_VLAN_FILTER(struct mtnic_port *priv, int port)
-{
- struct mtnic_if_config_port_vlan_filter_in_mbox *vlan_filter = priv->mtnic->cmd.buf;
-
- // When no vlans are configured we disable the filter
- // (i.e., pass all vlans) because we ignore them anyhow
- memset(vlan_filter, 0xff, sizeof(*vlan_filter));
- return mtnic_cmd(priv->mtnic, NULL, NULL, port + 1,
- MTNIC_IF_CMD_CONFIG_PORT_VLAN_FILTER);
-}
-*/
-
-
-static int
-mtnic_RELEASE_RESOURCE(struct mtnic_port *priv, u8 port, u8 type, u8 index)
-{
- struct mtnic_if_release_resource_in_imm rel;
- memset(&rel, 0, sizeof rel);
- rel.index = index;
- rel.type = type;
- return mtnic_cmd ( priv->mtnic,
- &rel, NULL, ( type == MTNIC_IF_RESOURCE_TYPE_EQ ) ?
- 0 : port + 1, MTNIC_IF_CMD_RELEASE_RESOURCE );
-}
-
-
-static int
-mtnic_QUERY_CAP(struct mtnic *mtnic, u8 index, u8 mod, u64 *result)
-{
- struct mtnic_if_query_cap_in_imm cap;
- u32 out_imm[2];
- int err;
-
- memset(&cap, 0, sizeof cap);
- cap.cap_index = index;
- cap.cap_modifier = mod;
- err = mtnic_cmd(mtnic, &cap, &out_imm, 0, MTNIC_IF_CMD_QUERY_CAP);
-
- *((u32*)result) = be32_to_cpu(*(out_imm+1));
- *((u32*)result + 1) = be32_to_cpu(*out_imm);
-
- DBG("Called Query cap with index:0x%x mod:%d result:0x%llx"
- " error:%d\n", index, mod, *result, err);
- return err;
-}
-
-
-#define DO_QUERY_CAP(cap, mod, var) \
- err = mtnic_QUERY_CAP(mtnic, cap, mod, &result);\
- if (err) \
- return err; \
- (var) = result
-
-static int
-mtnic_query_num_ports(struct mtnic *mtnic)
-{
- int err = 0;
- u64 result;
-
- DO_QUERY_CAP(MTNIC_IF_CAP_NUM_PORTS, 0, mtnic->fw.num_ports);
-
- return 0;
-}
-
-static int
-mtnic_query_mac(struct mtnic *mtnic)
-{
- int err = 0;
- int i;
- u64 result;
-
- for (i = 0; i < mtnic->fw.num_ports; i++) {
- DO_QUERY_CAP(MTNIC_IF_CAP_DEFAULT_MAC, i + 1, mtnic->fw.mac[i]);
- }
-
- return 0;
-}
-
-static int
-mtnic_query_offsets(struct mtnic *mtnic)
-{
- int err;
- int i;
- u64 result;
-
- DO_QUERY_CAP(MTNIC_IF_CAP_MEM_KEY,
- MTNIC_IF_MEM_TYPE_SNOOP,
- mtnic->fw.mem_type_snoop_be);
- mtnic->fw.mem_type_snoop_be = cpu_to_be32(mtnic->fw.mem_type_snoop_be);
- DO_QUERY_CAP(MTNIC_IF_CAP_TX_CQ_DB_OFFSET, 0, mtnic->fw.txcq_db_offset);
- DO_QUERY_CAP(MTNIC_IF_CAP_EQ_DB_OFFSET, 0, mtnic->fw.eq_db_offset);
-
- for (i = 0; i < mtnic->fw.num_ports; i++) {
- DO_QUERY_CAP(MTNIC_IF_CAP_CQ_OFFSET, i + 1, mtnic->fw.cq_offset);
- DO_QUERY_CAP(MTNIC_IF_CAP_TX_OFFSET, i + 1, mtnic->fw.tx_offset[i]);
- DO_QUERY_CAP(MTNIC_IF_CAP_RX_OFFSET, i + 1, mtnic->fw.rx_offset[i]);
- DBG("--> Port %d CQ offset:0x%x\n", i, mtnic->fw.cq_offset);
- DBG("--> Port %d Tx offset:0x%x\n", i, mtnic->fw.tx_offset[i]);
- DBG("--> Port %d Rx offset:0x%x\n", i, mtnic->fw.rx_offset[i]);
- }
-
- mdelay(20);
- return 0;
-}
-
-
-
-
-
-
-
-
-
-
-
-/********************************************************************
-*
-* MTNIC initalization functions
-*
-*
-*
-*
-*********************************************************************/
-
-/**
- * Reset device
- */
-void
-mtnic_reset ( void )
-{
- void *reset = ioremap ( mtnic_pci_dev.dev.bar[0] + MTNIC_RESET_OFFSET,
- 4 );
- writel ( cpu_to_be32 ( 1 ), reset );
- iounmap ( reset );
-}
-
-
-/**
- * Restore PCI config
- */
-static int
-restore_config(void)
-{
- int i;
- int rc;
-
- for (i = 0; i < 64; ++i) {
- if (i != 22 && i != 23) {
- rc = pci_write_config_dword(mtnic_pci_dev.dev.dev,
- i << 2,
- mtnic_pci_dev.dev.
- dev_config_space[i]);
- if (rc)
- return rc;
- }
- }
- return 0;
-}
-
-
-
-/**
- * Init PCI configuration
- */
-static int
-mtnic_init_pci(struct pci_device *dev)
-{
- int i;
- int err;
-
- /* save bars */
- DBG("bus=%d devfn=0x%x\n", dev->bus, dev->devfn);
- for (i = 0; i < 6; ++i) {
- mtnic_pci_dev.dev.bar[i] =
- pci_bar_start(dev, PCI_BASE_ADDRESS_0 + (i << 2));
- DBG("bar[%d]= 0x%08lx \n", i, mtnic_pci_dev.dev.bar[i]);
- }
-
- /* save config space */
- for (i = 0; i < 64; ++i) {
- err = pci_read_config_dword(dev, i << 2,
- &mtnic_pci_dev.dev.
- dev_config_space[i]);
- if (err) {
- DBG("Can not save configuration space");
- return err;
- }
- }
-
- mtnic_pci_dev.dev.dev = dev;
-
- return 0;
-}
-
-/**
- * Initial hardware
- */
-static inline
-int mtnic_init_card(struct mtnic *mtnic)
-{
- int err = 0;
-
-
- /* Alloc command interface */
- err = mtnic_alloc_cmdif ( mtnic );
- if (err) {
- DBG("Failed to init command interface, aborting\n");
- return -EADDRINUSE;
- }
-
-
- /**
- * Bring up HW
- */
- err = mtnic_QUERY_FW ( mtnic );
- if (err) {
- DBG("QUERY_FW command failed, aborting\n");
- goto cmd_error;
- }
- DBG("Command interface revision:%d\n", mtnic->fw.ifc_rev);
-
- /* Allocate memory for FW and start it */
- err = mtnic_map_cmd(mtnic, MTNIC_IF_CMD_MAP_FW, mtnic->fw.fw_pages);
- if (err) {
- DBG("Eror In MAP_FW\n");
- if (mtnic->fw.fw_pages.buf)
- ufree((intptr_t)mtnic->fw.fw_pages.buf);
- goto cmd_error;
- }
-
- /* Run firmware */
- err = mtnic_cmd(mtnic, NULL, NULL, 0, MTNIC_IF_CMD_RUN_FW);
- if (err) {
- DBG("Eror In RUN FW\n");
- goto map_fw_error;
- }
-
- DBG("FW version:%d.%d.%d\n",
- (u16) (mtnic->fw_ver >> 32),
- (u16) ((mtnic->fw_ver >> 16) & 0xffff),
- (u16) (mtnic->fw_ver & 0xffff));
-
-
- /* Query num ports */
- err = mtnic_query_num_ports(mtnic);
- if (err) {
- DBG("Insufficient resources, aborting\n");
- goto map_fw_error;
- }
-
- /* Open NIC */
- err = mtnic_OPEN_NIC(mtnic);
- if (err) {
- DBG("Failed opening NIC, aborting\n");
- goto map_fw_error;
- }
-
- /* Allocate and map pages worksace */
- err = mtnic_map_cmd(mtnic, MTNIC_IF_CMD_MAP_PAGES, mtnic->fw.extra_pages);
- if (err) {
- DBG("Couldn't allocate %x FW extra pages, aborting\n",
- mtnic->fw.extra_pages.num);
- if (mtnic->fw.extra_pages.buf)
- ufree((intptr_t)mtnic->fw.extra_pages.buf);
- goto map_fw_error;
- }
-
-
- /* Get device information */
- err = mtnic_query_mac(mtnic);
- if (err) {
- DBG("Insufficient resources in quesry mac, aborting\n");
- goto map_fw_error;
- }
-
- /* Get device offsets */
- err = mtnic_query_offsets(mtnic);
- if (err) {
- DBG("Failed retrieving resource offests, aborting\n");
- ufree((intptr_t)mtnic->fw.extra_pages.buf);
- goto map_extra_error;
- }
-
-
- /* Alloc EQ */
- err = mtnic_alloc_eq(mtnic);
- if (err) {
- DBG("Failed init shared resources. error: %d\n", err);
- goto map_extra_error;
- }
-
- /* Configure HW */
- err = mtnic_CONFIG_EQ(mtnic);
- if (err) {
- DBG("Failed configuring EQ\n");
- goto eq_error;
- }
- err = mtnic_CONFIG_RX(mtnic);
- if (err) {
- DBG("Failed Rx configuration\n");
- goto eq_error;
- }
- err = mtnic_CONFIG_TX(mtnic);
- if (err) {
- DBG("Failed Tx configuration\n");
- goto eq_error;
- }
-
-
- return 0;
-
-
-eq_error:
- iounmap(mtnic->eq_db);
- free_dma(mtnic->eq.buf, mtnic->eq.buf_size);
-map_extra_error:
- ufree((intptr_t)mtnic->fw.extra_pages.buf);
-map_fw_error:
- ufree((intptr_t)mtnic->fw.fw_pages.buf);
-
-cmd_error:
- iounmap(mtnic->hcr);
- free_dma(mtnic->cmd.buf, PAGE_SIZE);
-
- return -EADDRINUSE;
-}
-
-
-
-
-
-
-
-
-
-
-/*******************************************************************
-*
-* Process functions
-*
-* process compliations of TX and RX
-*
-*
-********************************************************************/
-void mtnic_process_tx_cq(struct mtnic_port *priv, struct net_device *dev,
- struct mtnic_cq *cq)
-{
- struct mtnic_cqe *cqe = cq->buf;
- struct mtnic_ring *ring = &priv->tx_ring;
- u16 index;
-
-
- index = cq->last & (cq->size-1);
- cqe = &cq->buf[index];
-
- /* Owner bit changes every round */
- while (XNOR(cqe->op_tr_own & MTNIC_BIT_CQ_OWN, cq->last & cq->size)) {
- netdev_tx_complete (dev, ring->iobuf[index]);
- ++cq->last;
- index = cq->last & (cq->size-1);
- cqe = &cq->buf[index];
- }
-
- /* Update consumer index */
- cq->db->update_ci = cpu_to_be32(cq->last & 0xffffff);
- wmb(); /* ensure HW sees CQ consumer before we post new buffers */
- ring->cons = cq->last;
-}
-
-
-int mtnic_process_rx_cq(struct mtnic_port *priv,
- struct net_device *dev,
- struct mtnic_cq *cq)
-{
- struct mtnic_cqe *cqe;
- struct mtnic_ring *ring = &priv->rx_ring;
- int index;
- int err;
- struct io_buffer *rx_iob;
- unsigned int length;
-
-
- /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
- * descriptor offset can be deduced from the CQE index instead of
- * reading 'cqe->index' */
- index = cq->last & (cq->size-1);
- cqe = &cq->buf[index];
-
- /* Process all completed CQEs */
- while (XNOR(cqe->op_tr_own & MTNIC_BIT_CQ_OWN, cq->last & cq->size)) {
- /* Drop packet on bad receive or bad checksum */
- if ((cqe->op_tr_own & 0x1f) == MTNIC_OPCODE_ERROR) {
- DBG("CQE completed with error - vendor \n");
- free_iob(ring->iobuf[index]);
- goto next;
- }
- if (cqe->enc_bf & MTNIC_BIT_BAD_FCS) {
- DBG("Accepted packet with bad FCS\n");
- free_iob(ring->iobuf[index]);
- goto next;
- }
-
- /*
- * Packet is OK - process it.
- */
- length = be32_to_cpu(cqe->byte_cnt);
- rx_iob = ring->iobuf[index];
- iob_put(rx_iob, length);
-
- /* Add this packet to the receive queue. */
- netdev_rx(dev, rx_iob);
- ring->iobuf[index] = NULL;
-
-next:
- ++cq->last;
- index = cq->last & (cq->size-1);
- cqe = &cq->buf[index];
-
-
-
- }
-
- /* Update consumer index */
- cq->db->update_ci = cpu_to_be32(cq->last & 0xffffff);
- wmb(); /* ensure HW sees CQ consumer before we post new buffers */
- ring->cons = cq->last;
-
- if (ring->prod - ring->cons < (MAX_GAP_PROD_CONS)) {
- err = mtnic_alloc_iobuf(priv, &priv->rx_ring, DEF_IOBUF_SIZE);
- if (err) {
- DBG("ERROR Allocating io buffer");
- return -EADDRINUSE;
- }
- }
-
- return 0;
-}
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-/********************************************************************
-*
-* net_device functions
-*
-*
-* open, poll, close, probe, disable, irq
-*
-*********************************************************************/
-static int
-mtnic_open(struct net_device *dev)
-{
- struct mtnic_port *priv = netdev_priv(dev);
-
- int err = 0;
- struct mtnic_ring *ring;
- struct mtnic_cq *cq;
- int cq_ind = 0;
- u32 dev_link_state;
- int link_check;
-
- DBG("starting port:%d, MAC Address: 0x%12llx\n",
- priv->port, priv->mtnic->fw.mac[priv->port]);
-
- /* Alloc and configure CQs, TX, RX */
- err = mtnic_alloc_resources ( dev );
- if (err) {
- DBG("Error allocating resources\n");
- return -EADDRINUSE;
- }
-
- /* Pass CQs configuration to HW */
- for (cq_ind = 0; cq_ind < NUM_CQS; ++cq_ind) {
- cq = &priv->cq[cq_ind];
- err = mtnic_CONFIG_CQ(priv, priv->port, cq_ind, cq);
- if (err) {
- DBG("Failed configuring CQ:%d error %d\n",
- cq_ind, err);
- if (cq_ind)
- goto cq_error;
- else
- goto allocation_error;
- }
- /* Update consumer index */
- cq->db->update_ci = cpu_to_be32(cq->last & 0xffffff);
- }
-
-
-
- /* Pass Tx configuration to HW */
- ring = &priv->tx_ring;
- err = mtnic_CONFIG_TX_RING(priv, priv->port, 0, ring);
- if (err) {
- DBG("Failed configuring Tx ring:0\n");
- goto cq_error;
- }
-
- /* Pass RX configuration to HW */
- ring = &priv->rx_ring;
- err = mtnic_CONFIG_RX_RING(priv, priv->port, 0, ring);
- if (err) {
- DBG("Failed configuring Rx ring:0\n");
- goto tx_error;
- }
-
- /* Configure Rx steering */
- err = mtnic_CONFIG_PORT_RSS_STEER(priv, priv->port);
- if (!err)
- err = mtnic_SET_PORT_RSS_INDIRECTION(priv, priv->port);
- if (err) {
- DBG("Failed configuring RSS steering\n");
- goto rx_error;
- }
-
-
- /* Set the port default ring to ring 0 */
- err = mtnic_SET_PORT_DEFAULT_RING(priv, priv->port, 0);
- if (err) {
- DBG("Failed setting default ring\n");
- goto rx_error;
- }
-
- /* Set Mac address */
- err = mtnic_SET_RX_RING_ADDR(priv, priv->port, &priv->mtnic->fw.mac[priv->port]);
- if (err) {
- DBG("Failed setting default MAC address\n");
- goto rx_error;
- }
-
- /* Set MTU */
- err = mtnic_SET_PORT_MTU(priv, priv->port, DEF_MTU);
- if (err) {
- DBG("Failed setting MTU\n");
- goto rx_error;
- }
-
- /* Configure VLAN filter */
- /* By adding this function, The second port won't accept packets
- err = mtnic_CONFIG_PORT_VLAN_FILTER(priv, priv->port);
- if (err) {
- DBG("Failed configuring VLAN filter\n");
- goto rx_error;
- }
- */
-
-
- /* Bring up physical link */
- err = mtnic_SET_PORT_STATE(priv, priv->port, 1);
- if (err) {
- DBG("Failed bringing up port\n");
- goto rx_error;
- }
-
- /* PORT IS UP */
- priv->state = CARD_UP;
-
-
- /* Checking Link is up */
- DBG ( "Checking if link is up\n" );
-
-
- for ( link_check = 0; link_check < CHECK_LINK_TIMES; link_check ++ ) {
- /* Let link state stabilize if cable was connected */
- mdelay ( DELAY_LINK_CHECK );
-
- err = mtnic_HEART_BEAT(priv, &dev_link_state);
- if (err) {
- DBG("Failed getting device link state\n");
- return -ENETDOWN;
- }
-
- if ( dev_link_state & priv->port ) {
- /* Link is up */
- break;
- }
- }
-
-
- if ( ! ( dev_link_state & 0x3 ) ) {
- DBG("Link down, check cables and restart\n");
- netdev_link_down ( dev );
- return -ENETDOWN;
- }
-
- DBG ( "Link is up!\n" );
-
- /* Mark as link up */
- netdev_link_up ( dev );
-
- return 0;
-
-rx_error:
- err = mtnic_RELEASE_RESOURCE(priv, priv->port,
- MTNIC_IF_RESOURCE_TYPE_RX_RING, 0);
-tx_error:
- err |= mtnic_RELEASE_RESOURCE(priv, priv->port,
- MTNIC_IF_RESOURCE_TYPE_TX_RING, 0);
-
-cq_error:
- while (cq_ind) {
- err |= mtnic_RELEASE_RESOURCE(priv, priv->port,
- MTNIC_IF_RESOURCE_TYPE_CQ, --cq_ind);
- }
- if (err)
- DBG("Eror Releasing resources\n");
-
-allocation_error:
-
- free_dma(priv->tx_ring.buf, priv->tx_ring.buf_size);
- iounmap(priv->tx_ring.txcq_db);
- free_dma(priv->cq[1].buf, priv->cq[1].buf_size);
- free_dma(priv->cq[1].db, sizeof(struct mtnic_cq_db_record));
- free_dma(priv->rx_ring.buf, priv->rx_ring.buf_size);
- free_dma(priv->rx_ring.db, sizeof(struct mtnic_cq_db_record));
- free_dma(priv->cq[0].buf, priv->cq[0].buf_size);
- free_dma(priv->cq[0].db, sizeof(struct mtnic_cq_db_record));
-
- mtnic_free_io_buffers(&priv->rx_ring);
-
- return -ENETDOWN;
-}
-
-
-
-
-/** Check if we got completion for receive and transmit and
- * check the line with heart_bit command */
-static void
-mtnic_poll ( struct net_device *dev )
-{
- struct mtnic_port *priv = netdev_priv(dev);
- struct mtnic_cq *cq;
- u32 dev_link_state;
- int err;
- unsigned int i;
-
- /* In case of an old error then return */
- if (priv->state != CARD_UP)
- return;
-
- /* We do not check the device every call _poll call,
- since it will slow it down */
- if ((priv->poll_counter % ROUND_TO_CHECK) == 0) {
- /* Check device */
- err = mtnic_HEART_BEAT(priv, &dev_link_state);
- if (err) {
- DBG("Device has internal error\n");
- priv->state = CARD_LINK_DOWN;
- return;
- }
- if (!(dev_link_state & 0x3)) {
- DBG("Link down, check cables and restart\n");
- priv->state = CARD_LINK_DOWN;
- return;
- }
- }
- /* Polling CQ */
- for (i = 0; i < NUM_CQS; i++) {
- cq = &priv->cq[i]; //Passing on the 2 cqs.
-
- if (cq->is_rx) {
- err = mtnic_process_rx_cq(priv, cq->dev, cq);
- if (err) {
- priv->state = CARD_LINK_DOWN;
- DBG(" Error allocating RX buffers\n");
- return;
- }
- } else {
- mtnic_process_tx_cq(priv, cq->dev, cq);
- }
- }
- ++ priv->poll_counter;
-}
-
-
-
-static int
-mtnic_transmit( struct net_device *dev, struct io_buffer *iobuf )
-{
-
- struct mtnic_port *priv = netdev_priv(dev);
- struct mtnic_ring *ring;
- struct mtnic_tx_desc *tx_desc;
- struct mtnic_data_seg *data;
- u32 index;
-
- /* In case of an error then return */
- if (priv->state != CARD_UP)
- return -ENETDOWN;
-
- ring = &priv->tx_ring;
-
- index = ring->prod & ring->size_mask;
- if ((ring->prod - ring->cons) >= ring->size) {
- DBG("No space left for descriptors!!! cons: %x prod: %x\n",
- ring->cons, ring->prod);
- mdelay(5);
- return -EAGAIN;/* no space left */
- }
-
- /* get current descriptor */
- tx_desc = ring->buf + (index * sizeof(struct mtnic_tx_desc));
-
- /* Prepare Data Seg */
- data = &tx_desc->data;
- data->addr_l = cpu_to_be32((u32)virt_to_bus(iobuf->data));
- data->count = cpu_to_be32(iob_len(iobuf));
- data->mem_type = priv->mtnic->fw.mem_type_snoop_be;
-
- /* Prepare ctrl segement */
- tx_desc->ctrl.size_vlan = cpu_to_be32(2);
- tx_desc->ctrl.flags = cpu_to_be32(MTNIC_BIT_TX_COMP |
- MTNIC_BIT_NO_ICRC);
- tx_desc->ctrl.op_own = cpu_to_be32(MTNIC_OPCODE_SEND) |
- ((ring->prod & ring->size) ?
- cpu_to_be32(MTNIC_BIT_DESC_OWN) : 0);
-
- /* Attach io_buffer */
- ring->iobuf[index] = iobuf;
-
- /* Update producer index */
- ++ring->prod;
-
- /* Ring doorbell! */
- wmb();
- writel((u32) ring->db_offset, &ring->txcq_db->send_db);
-
- return 0;
-}
-
-
-static void
-mtnic_close(struct net_device *dev)
-{
- struct mtnic_port *priv = netdev_priv(dev);
- int err = 0;
- DBG("Close called for port:%d\n", priv->port);
-
- if ( ( priv->state == CARD_UP ) ||
- ( priv->state == CARD_LINK_DOWN ) ) {
-
- /* Disable port */
- err |= mtnic_SET_PORT_STATE(priv, priv->port, 0);
- /*
- * Stop HW associated with this port
- */
- mdelay(5);
-
- /* Stop RX */
- err |= mtnic_RELEASE_RESOURCE(priv, priv->port,
- MTNIC_IF_RESOURCE_TYPE_RX_RING, 0);
-
- /* Stop TX */
- err |= mtnic_RELEASE_RESOURCE(priv, priv->port,
- MTNIC_IF_RESOURCE_TYPE_TX_RING, 0);
-
- /* Stop CQs */
- err |= mtnic_RELEASE_RESOURCE(priv, priv->port,
- MTNIC_IF_RESOURCE_TYPE_CQ, 0);
- err |= mtnic_RELEASE_RESOURCE(priv, priv->port,
- MTNIC_IF_RESOURCE_TYPE_CQ, 1);
- if (err) {
- DBG("Close reported error %d\n", err);
- }
-
- mdelay ( 10 );
-
- /* free memory */
- free_dma(priv->tx_ring.buf, priv->tx_ring.buf_size);
- iounmap(priv->tx_ring.txcq_db);
- free_dma(priv->cq[1].buf, priv->cq[1].buf_size);
- free_dma(priv->cq[1].db, sizeof(struct mtnic_cq_db_record));
- free_dma(priv->rx_ring.buf, priv->rx_ring.buf_size);
- free_dma(priv->rx_ring.db, sizeof(struct mtnic_cq_db_record));
- free_dma(priv->cq[0].buf, priv->cq[0].buf_size);
- free_dma(priv->cq[0].db, sizeof(struct mtnic_cq_db_record));
-
- /* Free RX buffers */
- mtnic_free_io_buffers(&priv->rx_ring);
-
-
-
- }
-
- priv->state = CARD_INITIALIZED;
-
-}
-
-
-static void
-mtnic_disable(struct pci_device *pci)
-{
-
- int err;
- int i;
- struct mtnic *mtnic = pci_get_drvdata(pci);
-
-
- struct net_device *dev;
- struct mtnic_port *priv;
-
- for ( i = ( mtnic->fw.num_ports - 1 ); i >= 0; i-- ) {
-
- dev = mtnic->netdev[i];
-
- priv = netdev_priv(dev);
-
- /* Just in case */
- if ( ( priv->state == CARD_UP ) ||
- ( priv->state == CARD_LINK_DOWN ) )
- mtnic_close ( dev );
- }
-
- /* Releasing EQ */
- priv = netdev_priv ( mtnic->netdev[0] );
- err = mtnic_RELEASE_RESOURCE(priv, 1,
- MTNIC_IF_RESOURCE_TYPE_EQ, 0);
-
- DBG("Calling MTNIC_CLOSE command\n");
- err |= mtnic_cmd(mtnic, NULL, NULL, 0,
- MTNIC_IF_CMD_CLOSE_NIC);
- if (err) {
- DBG("Error Releasing resources %d\n", err);
- }
-
- free_dma(mtnic->cmd.buf, PAGE_SIZE);
- iounmap(mtnic->hcr);
- ufree((intptr_t)mtnic->fw.fw_pages.buf);
- ufree((intptr_t)mtnic->fw.extra_pages.buf);
- free_dma(mtnic->eq.buf, mtnic->eq.buf_size);
- iounmap(mtnic->eq_db);
-
-
- for ( i = ( mtnic->fw.num_ports - 1 ); i >= 0; i-- ) {
- dev = mtnic->netdev[i];
- unregister_netdev ( dev );
- netdev_nullify ( dev );
- netdev_put ( dev );
- }
-
- free ( mtnic );
-
-
- mtnic_reset ();
- mdelay ( 1000 );
- /* Restore config, if we would like to retry booting */
- restore_config ();
-
-
-}
-
-
-
-static void
-mtnic_irq(struct net_device *netdev __unused, int enable __unused)
-{
- /* Not implemented */
-}
-
-
-
-/** mtnic net device operations */
-static struct net_device_operations mtnic_operations = {
- .open = mtnic_open,
- .close = mtnic_close,
- .transmit = mtnic_transmit,
- .poll = mtnic_poll,
- .irq = mtnic_irq,
-};
-
-
-
-
-
-
-
-static int
-mtnic_probe(struct pci_device *pci,
- const struct pci_device_id *id __unused)
-{
- struct mtnic_port *priv;
- struct mtnic *mtnic;
- int err;
- u64 mac;
- int port_index;
-
-
- adjust_pci_device(pci);
-
- err = mtnic_init_pci(pci);
- if (err) {
- DBG("Error in pci_init\n");
- return -EIO;
- }
-
- mtnic_reset();
- mdelay(1000);
-
- err = restore_config();
- if (err) {
- DBG("Error in restoring config\n");
- return err;
- }
-
- mtnic = zalloc ( sizeof ( *mtnic ) );
- if ( ! mtnic ) {
- DBG ( "Error Allocating mtnic buffer\n" );
- return -EADDRINUSE;
- }
-
- pci_set_drvdata(pci, mtnic);
-
- mtnic->pdev = pci;
-
-
- /* Initialize hardware */
- err = mtnic_init_card ( mtnic );
- if (err) {
- DBG("Error in init_card\n");
- goto err_init_card;
- }
-
- for ( port_index = 0; port_index < mtnic->fw.num_ports; port_index ++ ) {
- /* Initializing net device */
- mtnic->netdev[port_index] = alloc_etherdev( sizeof ( struct mtnic_port ) );
- if ( mtnic->netdev[port_index] == NULL ) {
- DBG("Net device allocation failed\n");
- goto err_alloc_mtnic;
- }
-
- /*
- * Initialize driver private data
- */
-
- mtnic->netdev[port_index]->dev = &pci->dev;
- priv = netdev_priv ( mtnic->netdev[port_index] );
- memset ( priv, 0, sizeof ( struct mtnic_port ) );
- priv->mtnic = mtnic;
- priv->netdev = mtnic->netdev[port_index];
-
- /* Attach pci device */
- netdev_init(mtnic->netdev[port_index], &mtnic_operations);
-
- /* Set port number */
- priv->port = port_index;
-
- /* Set state */
- priv->state = CARD_DOWN;
- }
-
-
- int mac_idx;
- for ( port_index = 0; port_index < mtnic->fw.num_ports; port_index ++ ) {
- priv = netdev_priv ( mtnic->netdev[port_index] );
- /* Program the MAC address */
- mac = priv->mtnic->fw.mac[port_index];
- for (mac_idx = 0; mac_idx < MAC_ADDRESS_SIZE; ++mac_idx) {
- mtnic->netdev[port_index]->hw_addr[MAC_ADDRESS_SIZE - mac_idx - 1] = mac & 0xFF;
- mac = mac >> 8;
- }
-
- if ( register_netdev ( mtnic->netdev[port_index] ) ) {
- DBG("Netdev registration failed\n");
- priv->state = CARD_INITIALIZED;
- goto err_alloc_mtnic;
- }
- }
-
-
- return 0;
-
-err_alloc_mtnic:
- free ( mtnic );
-err_init_card:
- return -EIO;
-}
-
-
-
-
-static struct pci_device_id mtnic_nics[] = {
- PCI_ROM ( 0x15b3, 0x6368, "mt25448", "Mellanox ConnectX EN driver", 0 ),
- PCI_ROM ( 0x15b3, 0x6372, "mt25458", "Mellanox ConnectX ENt driver", 0 ),
- PCI_ROM ( 0x15b3, 0x6750, "mt26448", "Mellanox ConnectX EN GEN2 driver", 0 ),
- PCI_ROM ( 0x15b3, 0x675a, "mt26458", "Mellanox ConnectX ENt GEN2 driver", 0 ),
-};
-
-struct pci_driver mtnic_driver __pci_driver = {
- .ids = mtnic_nics,
- .id_count = sizeof(mtnic_nics) / sizeof(mtnic_nics[0]),
- .probe = mtnic_probe,
- .remove = mtnic_disable,
-};
-
diff --git a/src/drivers/net/mtnic.h b/src/drivers/net/mtnic.h
deleted file mode 100644
index aa240e22..00000000
--- a/src/drivers/net/mtnic.h
+++ /dev/null
@@ -1,722 +0,0 @@
-/*
- * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-FILE_LICENCE ( GPL2_ONLY );
-
-#ifndef H_MTNIC_IF_DEFS_H
-#define H_MTNIC_IF_DEFS_H
-
-
-
-/*
-* Device setup
-*/
-#define MTNIC_MAX_PORTS 2
-#define MTNIC_PORT1 0
-#define MTNIC_PORT2 1
-#define NUM_TX_RINGS 1
-#define NUM_RX_RINGS 1
-#define NUM_CQS (NUM_RX_RINGS + NUM_TX_RINGS)
-#define GO_BIT_TIMEOUT 6000
-#define TBIT_RETRIES 100
-#define UNITS_BUFFER_SIZE 8 /* can be configured to 4/8/16 */
-#define MAX_GAP_PROD_CONS ( UNITS_BUFFER_SIZE / 4 )
-#define ETH_DEF_LEN 1540 /* 40 bytes used by the card */
-#define ETH_FCS_LEN 14
-#define DEF_MTU ETH_DEF_LEN + ETH_FCS_LEN
-#define DEF_IOBUF_SIZE ETH_DEF_LEN
-
-#define MAC_ADDRESS_SIZE 6
-#define NUM_EQES 16
-#define ROUND_TO_CHECK 0x400
-
-#define DELAY_LINK_CHECK 300
-#define CHECK_LINK_TIMES 7
-
-
-#define XNOR(x,y) (!(x) == !(y))
-#define dma_addr_t unsigned long
-#define PAGE_SIZE 4096
-#define PAGE_MASK (PAGE_SIZE - 1)
-#define MTNIC_MAILBOX_SIZE PAGE_SIZE
-
-
-
-
-/* BITOPS */
-#define MTNIC_BC_OFF(bc) ((bc) >> 8)
-#define MTNIC_BC_SZ(bc) ((bc) & 0xff)
-#define MTNIC_BC_ONES(size) (~((int)0x80000000 >> (31 - size)))
-#define MTNIC_BC_MASK(bc) \
- (MTNIC_BC_ONES(MTNIC_BC_SZ(bc)) << MTNIC_BC_OFF(bc))
-#define MTNIC_BC_VAL(val, bc) \
- (((val) & MTNIC_BC_ONES(MTNIC_BC_SZ(bc))) << MTNIC_BC_OFF(bc))
-/*
- * Sub word fields - bit code base extraction/setting etc
- */
-
-/* Encode two values */
-#define MTNIC_BC(off, size) ((off << 8) | (size & 0xff))
-
-/* Get value of field 'bc' from 'x' */
-#define MTNIC_BC_GET(x, bc) \
- (((x) >> MTNIC_BC_OFF(bc)) & MTNIC_BC_ONES(MTNIC_BC_SZ(bc)))
-
-/* Set value of field 'bc' of 'x' to 'val' */
-#define MTNIC_BC_SET(x, val, bc) \
- ((x) = ((x) & ~MTNIC_BC_MASK(bc)) | MTNIC_BC_VAL(val, bc))
-
-/* Like MTNIC_BC_SET, except the previous value is assumed to be 0 */
-#define MTNIC_BC_PUT(x, val, bc) ((x) |= MTNIC_BC_VAL(val, bc))
-
-
-
-/*
- * Device constants
- */
-typedef enum mtnic_if_cmd {
- /* NIC commands: */
- MTNIC_IF_CMD_QUERY_FW = 0x004, /* query FW (size, version, etc) */
- MTNIC_IF_CMD_MAP_FW = 0xfff, /* map pages for FW image */
- MTNIC_IF_CMD_RUN_FW = 0xff6, /* run the FW */
- MTNIC_IF_CMD_QUERY_CAP = 0x001, /* query MTNIC capabilities */
- MTNIC_IF_CMD_MAP_PAGES = 0x002, /* map physical pages to HW */
- MTNIC_IF_CMD_OPEN_NIC = 0x003, /* run the firmware */
- MTNIC_IF_CMD_CONFIG_RX = 0x005, /* general receive configuration */
- MTNIC_IF_CMD_CONFIG_TX = 0x006, /* general transmit configuration */
- MTNIC_IF_CMD_CONFIG_INT_FREQ = 0x007, /* interrupt timers freq limits */
- MTNIC_IF_CMD_HEART_BEAT = 0x008, /* NOP command testing liveliness */
- MTNIC_IF_CMD_CLOSE_NIC = 0x009, /* release memory and stop the NIC */
-
- /* Port commands: */
- MTNIC_IF_CMD_CONFIG_PORT_RSS_STEER = 0x10, /* set RSS mode */
- MTNIC_IF_CMD_SET_PORT_RSS_INDIRECTION = 0x11, /* set RSS indirection tbl */
- MTNIC_IF_CMD_CONFIG_PORT_PRIO_STEERING = 0x12, /* set PRIORITY mode */
- MTNIC_IF_CMD_CONFIG_PORT_ADDR_STEER = 0x13, /* set Address steer mode */
- MTNIC_IF_CMD_CONFIG_PORT_VLAN_FILTER = 0x14, /* configure VLAN filter */
- MTNIC_IF_CMD_CONFIG_PORT_MCAST_FILTER = 0x15, /* configure mcast filter */
- MTNIC_IF_CMD_ENABLE_PORT_MCAST_FILTER = 0x16, /* enable/disable */
- MTNIC_IF_CMD_SET_PORT_MTU = 0x17, /* set port MTU */
- MTNIC_IF_CMD_SET_PORT_PROMISCUOUS_MODE = 0x18, /* enable/disable promisc */
- MTNIC_IF_CMD_SET_PORT_DEFAULT_RING = 0x19, /* set the default ring */
- MTNIC_IF_CMD_SET_PORT_STATE = 0x1a, /* set link up/down */
- MTNIC_IF_CMD_DUMP_STAT = 0x1b, /* dump statistics */
- MTNIC_IF_CMD_ARM_PORT_STATE_EVENT = 0x1c, /* arm the port state event */
-
- /* Ring / Completion queue commands: */
- MTNIC_IF_CMD_CONFIG_CQ = 0x20, /* set up completion queue */
- MTNIC_IF_CMD_CONFIG_RX_RING = 0x21, /* setup Rx ring */
- MTNIC_IF_CMD_SET_RX_RING_ADDR = 0x22, /* set Rx ring filter by address */
- MTNIC_IF_CMD_SET_RX_RING_MCAST = 0x23, /* set Rx ring mcast filter */
- MTNIC_IF_CMD_ARM_RX_RING_WM = 0x24, /* one-time low-watermark INT */
- MTNIC_IF_CMD_CONFIG_TX_RING = 0x25, /* set up Tx ring */
- MTNIC_IF_CMD_ENFORCE_TX_RING_ADDR = 0x26, /* setup anti spoofing */
- MTNIC_IF_CMD_CONFIG_EQ = 0x27, /* config EQ ring */
- MTNIC_IF_CMD_RELEASE_RESOURCE = 0x28, /* release internal ref to resource */
-}
-mtnic_if_cmd_t;
-
-
-/** selectors for MTNIC_IF_CMD_QUERY_CAP */
-typedef enum mtnic_if_caps {
- MTNIC_IF_CAP_MAX_TX_RING_PER_PORT = 0x0,
- MTNIC_IF_CAP_MAX_RX_RING_PER_PORT = 0x1,
- MTNIC_IF_CAP_MAX_CQ_PER_PORT = 0x2,
- MTNIC_IF_CAP_NUM_PORTS = 0x3,
- MTNIC_IF_CAP_MAX_TX_DESC = 0x4,
- MTNIC_IF_CAP_MAX_RX_DESC = 0x5,
- MTNIC_IF_CAP_MAX_CQES = 0x6,
- MTNIC_IF_CAP_MAX_TX_SG_ENTRIES = 0x7,
- MTNIC_IF_CAP_MAX_RX_SG_ENTRIES = 0x8,
- MTNIC_IF_CAP_MEM_KEY = 0x9, /* key to mem (after map_pages) */
- MTNIC_IF_CAP_RSS_HASH_TYPE = 0xa, /* one of mtnic_if_rss_types_t */
- MTNIC_IF_CAP_MAX_PORT_UCAST_ADDR = 0xc,
- MTNIC_IF_CAP_MAX_RING_UCAST_ADDR = 0xd, /* only for ADDR steer */
- MTNIC_IF_CAP_MAX_PORT_MCAST_ADDR = 0xe,
- MTNIC_IF_CAP_MAX_RING_MCAST_ADDR = 0xf, /* only for ADDR steer */
- MTNIC_IF_CAP_INTA = 0x10,
- MTNIC_IF_CAP_BOARD_ID_LOW = 0x11,
- MTNIC_IF_CAP_BOARD_ID_HIGH = 0x12,
- MTNIC_IF_CAP_TX_CQ_DB_OFFSET = 0x13, /* offset in bytes for TX, CQ doorbell record */
- MTNIC_IF_CAP_EQ_DB_OFFSET = 0x14, /* offset in bytes for EQ doorbell record */
-
- /* These are per port - using port number from cap modifier field */
- MTNIC_IF_CAP_SPEED = 0x20,
- MTNIC_IF_CAP_DEFAULT_MAC = 0x21,
- MTNIC_IF_CAP_EQ_OFFSET = 0x22,
- MTNIC_IF_CAP_CQ_OFFSET = 0x23,
- MTNIC_IF_CAP_TX_OFFSET = 0x24,
- MTNIC_IF_CAP_RX_OFFSET = 0x25,
-
-} mtnic_if_caps_t;
-
-typedef enum mtnic_if_steer_types {
- MTNIC_IF_STEER_NONE = 0,
- MTNIC_IF_STEER_PRIORITY = 1,
- MTNIC_IF_STEER_RSS = 2,
- MTNIC_IF_STEER_ADDRESS = 3,
-} mtnic_if_steer_types_t;
-
-/** types of memory access modes */
-typedef enum mtnic_if_memory_types {
- MTNIC_IF_MEM_TYPE_SNOOP = 1,
- MTNIC_IF_MEM_TYPE_NO_SNOOP = 2
-} mtnic_if_memory_types_t;
-
-
-enum {
- MTNIC_HCR_BASE = 0x1f000,
- MTNIC_HCR_SIZE = 0x0001c,
- MTNIC_CLR_INT_SIZE = 0x00008,
-};
-
-#define MTNIC_RESET_OFFSET 0xF0010
-
-
-
-/********************************************************************
-* Device private data structures
-*
-* This section contains structures of all device private data:
-* descriptors, rings, CQs, EQ ....
-*
-*
-*********************************************************************/
-/*
- * Descriptor format
- */
-struct mtnic_ctrl_seg {
- u32 op_own;
-#define MTNIC_BIT_DESC_OWN 0x80000000
-#define MTNIC_OPCODE_SEND 0xa
- u32 size_vlan;
- u32 flags;
-#define MTNIC_BIT_NO_ICRC 0x2
-#define MTNIC_BIT_TX_COMP 0xc
- u32 reserved;
-};
-
-struct mtnic_data_seg {
- u32 count;
-#define MTNIC_INLINE 0x80000000
- u32 mem_type;
-#define MTNIC_MEMTYPE_PAD 0x100
- u32 addr_h;
- u32 addr_l;
-};
-
-struct mtnic_tx_desc {
- struct mtnic_ctrl_seg ctrl;
- struct mtnic_data_seg data; /* at least one data segment */
-};
-
-struct mtnic_rx_desc {
- u16 reserved1;
- u16 next;
- u32 reserved2[3];
- struct mtnic_data_seg data; /* actual number of entries depends on
- * rx ring stride */
-};
-
-/*
- * Rings
- */
-struct mtnic_rx_db_record {
- u32 count;
-};
-
-struct mtnic_ring {
- u32 size; /* REMOVE ____cacheline_aligned_in_smp; *//* number of Rx descs or TXBBs */
- u32 size_mask;
- u16 stride;
- u16 cq; /* index of port CQ associated with this ring */
- u32 prod;
- u32 cons; /* holds the last consumed index */
-
- /* Buffers */
- u32 buf_size; /* ring buffer size in bytes */
- dma_addr_t dma;
- void *buf;
- struct io_buffer *iobuf[UNITS_BUFFER_SIZE];
-
- /* Tx only */
- struct mtnic_txcq_db *txcq_db;
- u32 db_offset;
-
- /* Rx ring only */
- dma_addr_t iobuf_dma;
- struct mtnic_rx_db_record *db;
- dma_addr_t db_dma;
-};
-
-/*
- * CQ
- */
-
-struct mtnic_cqe {
- u8 vp; /* VLAN present */
- u8 reserved1[3];
- u32 rss_hash;
- u32 reserved2;
- u16 vlan_prio;
- u16 reserved3;
- u8 flags_h;
- u8 flags_l_rht;
- u8 ipv6_mask;
- u8 enc_bf;
-#define MTNIC_BIT_BAD_FCS 0x10
-#define MTNIC_OPCODE_ERROR 0x1e
- u32 byte_cnt;
- u16 index;
- u16 chksum;
- u8 reserved4[3];
- u8 op_tr_own;
-#define MTNIC_BIT_CQ_OWN 0x80
-};
-
-
-struct mtnic_cq_db_record {
- u32 update_ci;
- u32 cmd_ci;
-};
-
-struct mtnic_cq {
- int num; /* CQ number (on attached port) */
- u32 size; /* number of CQEs in CQ */
- u32 last; /* number of CQEs consumed */
- struct mtnic_cq_db_record *db;
- struct net_device *dev;
-
- dma_addr_t db_dma;
- u8 is_rx;
- u16 ring; /* ring associated with this CQ */
- u32 offset_ind;
-
- /* CQE ring */
- u32 buf_size; /* ring size in bytes */
- struct mtnic_cqe *buf;
- dma_addr_t dma;
-};
-
-/*
- * EQ
- */
-
-struct mtnic_eqe {
- u8 reserved1;
- u8 type;
- u8 reserved2;
- u8 subtype;
- u8 reserved3[3];
- u8 ring_cq;
- u32 reserved4;
- u8 port;
-#define MTNIC_MASK_EQE_PORT MTNIC_BC(4,2)
- u8 reserved5[2];
- u8 syndrome;
- u8 reserved6[15];
- u8 own;
-#define MTNIC_BIT_EQE_OWN 0x80
-};
-
-struct mtnic_eq {
- u32 size; /* number of EQEs in ring */
- u32 buf_size; /* EQ size in bytes */
- void *buf;
- dma_addr_t dma;
-};
-
-enum mtnic_state {
- CARD_DOWN,
- CARD_INITIALIZED,
- CARD_UP,
- CARD_LINK_DOWN,
-};
-
-/* FW */
-struct mtnic_pages {
- u32 num;
- u32 *buf;
-};
-struct mtnic_err_buf {
- u64 offset;
- u32 size;
-};
-
-
-
-struct mtnic_cmd {
- void *buf;
- unsigned long mapping;
- u32 tbit;
-};
-
-
-struct mtnic_txcq_db {
- u32 reserved1[5];
- u32 send_db;
- u32 reserved2[2];
- u32 cq_arm;
- u32 cq_ci;
-};
-
-
-
-/*
- * Device private data
- *
- */
-struct mtnic {
- struct net_device *netdev[MTNIC_MAX_PORTS];
- struct mtnic_if_cmd_reg *hcr;
- struct mtnic_cmd cmd;
- struct pci_device *pdev;
-
- struct mtnic_eq eq;
- u32 *eq_db;
-
- /* Firmware and board info */
- u64 fw_ver;
- struct {
- struct mtnic_pages fw_pages;
- struct mtnic_pages extra_pages;
- struct mtnic_err_buf err_buf;
- u16 ifc_rev;
- u8 num_ports;
- u64 mac[MTNIC_MAX_PORTS];
- u16 cq_offset;
- u16 tx_offset[MTNIC_MAX_PORTS];
- u16 rx_offset[MTNIC_MAX_PORTS];
- u32 mem_type_snoop_be;
- u32 txcq_db_offset;
- u32 eq_db_offset;
- } fw;
-};
-
-
-
-
-
-struct mtnic_port {
-
- struct mtnic *mtnic;
- u8 port;
-
- enum mtnic_state state;
-
- /* TX, RX, CQs, EQ */
- struct mtnic_ring tx_ring;
- struct mtnic_ring rx_ring;
- struct mtnic_cq cq[NUM_CQS];
- u32 poll_counter;
- struct net_device *netdev;
-
-
-};
-
-
-
-
-
-
-
-
-
-
-
-
-/***************************************************************************
- * NIC COMMANDS
- *
- * The section below provides struct definition for commands parameters,
- * and arguments values enumeration.
- *
- * The format used for the struct names is:
- * mtnic_if_<cmd name>_<in|out>_<imm|mbox>
- *
- ***************************************************************************/
-/**
- * Command Register (Command interface)
- */
-struct mtnic_if_cmd_reg {
- unsigned long in_param_h;
- u32 in_param_l;
- u32 input_modifier;
- u32 out_param_h;
- u32 out_param_l;
- u32 token;
-#define MTNIC_MASK_CMD_REG_TOKEN MTNIC_BC(16,32)
- u32 status_go_opcode;
-#define MTNIC_MASK_CMD_REG_OPCODE MTNIC_BC(0,16)
-#define MTNIC_MASK_CMD_REG_T_BIT MTNIC_BC(21,1)
-#define MTNIC_MASK_CMD_REG_GO_BIT MTNIC_BC(23,1)
-#define MTNIC_MASK_CMD_REG_STATUS MTNIC_BC(24,8)
-};
-
-
-
-/* CMD QUERY_FW */
-struct mtnic_if_query_fw_out_mbox {
- u16 fw_pages; /* Total number of memory pages the device requires */
- u16 rev_maj;
- u16 rev_smin;
- u16 rev_min;
- u16 reserved1;
- u16 ifc_rev; /* major revision of the command interface */
- u8 ft;
- u8 reserved2[3];
- u32 reserved3[4];
- u64 clr_int_base;
- u32 reserved4[2];
- u64 err_buf_start;
- u32 err_buf_size;
-};
-
-/* CMD MTNIC_IF_CMD_QUERY_CAP */
-struct mtnic_if_query_cap_in_imm {
- u16 reserved1;
- u8 cap_modifier; /* a modifier for the particular capability */
- u8 cap_index; /* the index of the capability queried */
- u32 reserved2;
-};
-
-/* CMD OPEN_NIC */
-struct mtnic_if_open_nic_in_mbox {
- u16 reserved1;
- u16 mkey; /* number of mem keys for all chip*/
- u32 mkey_entry; /* mem key entries for each key*/
- u8 log_rx_p1; /* log2 rx rings for port1 */
- u8 log_cq_p1; /* log2 cq for port1 */
- u8 log_tx_p1; /* log2 tx rings for port1 */
- u8 steer_p1; /* port 1 steering mode */
- u16 reserved2;
- u8 log_vlan_p1; /* log2 vlan per rx port1 */
- u8 log_mac_p1; /* log2 mac per rx port1 */
-
- u8 log_rx_p2; /* log2 rx rings for port1 */
- u8 log_cq_p2; /* log2 cq for port1 */
- u8 log_tx_p2; /* log2 tx rings for port1 */
- u8 steer_p2; /* port 1 steering mode */
- u16 reserved3;
- u8 log_vlan_p2; /* log2 vlan per rx port1 */
- u8 log_mac_p2; /* log2 mac per rx port1 */
-};
-
-
-/* CMD CONFIG_RX */
-struct mtnic_if_config_rx_in_imm {
- u16 spkt_size; /* size of small packets interrupts enabled on CQ */
- u16 resp_rcv_pause_frm_mcast_vlan_comp; /* Two flags see MASK below */
- /* Enable response to receive pause frames */
- /* Use VLAN in exact-match multicast checks (see SET_RX_RING_MCAST) */
-};
-
-/* CMD CONFIG_TX */
-struct mtnic_if_config_send_in_imm {
- u32 enph_gpf; /* Enable PseudoHeader and GeneratePauseFrames flags */
- u32 reserved;
-};
-
-/* CMD HEART_BEAT */
-struct mtnic_if_heart_beat_out_imm {
- u32 flags; /* several flags */
-#define MTNIC_MASK_HEAR_BEAT_INT_ERROR MTNIC_BC(31,1)
- u32 reserved;
-};
-
-
-/*
- * PORT COMMANDS
- */
-/* CMD CONFIG_PORT_VLAN_FILTER */
-/* in mbox is a 4K bits mask - bit per VLAN */
-struct mtnic_if_config_port_vlan_filter_in_mbox {
- u64 filter[64]; /* vlans[63:0] sit in filter[0], vlans[127:64] sit in filter[1] .. */
-};
-
-
-/* CMD SET_PORT_MTU */
-struct mtnic_if_set_port_mtu_in_imm {
- u16 reserved1;
- u16 mtu; /* The MTU of the port in bytes */
- u32 reserved2;
-};
-
-/* CMD SET_PORT_DEFAULT_RING */
-struct mtnic_if_set_port_default_ring_in_imm {
- u8 reserved1[3];
- u8 ring; /* Index of ring that collects promiscuous traffic */
- u32 reserved2;
-};
-
-/* CMD SET_PORT_STATE */
-struct mtnic_if_set_port_state_in_imm {
- u32 state; /* if 1 the port state should be up */
-#define MTNIC_MASK_CONFIG_PORT_STATE MTNIC_BC(0,1)
- u32 reserved;
-};
-
-/* CMD CONFIG_CQ */
-struct mtnic_if_config_cq_in_mbox {
- u8 reserved1;
- u8 cq;
- u8 size; /* Num CQs is 2^size (size <= 22) */
- u8 offset; /* start address of CQE in first page (11:6) */
- u16 tlast; /* interrupt moderation timer from last completion usec */
- u8 flags; /* flags */
- u8 int_vector; /* MSI index if MSI is enabled, otherwise reserved */
- u16 reserved2;
- u16 max_cnt; /* interrupt moderation counter */
- u8 page_size; /* each mapped page is 2^(12+page_size) bytes */
- u8 reserved4[3];
- u32 db_record_addr_h; /*physical address of CQ doorbell record */
- u32 db_record_addr_l; /*physical address of CQ doorbell record */
- u32 page_address[0]; /* 64 bit page addresses of CQ buffer */
-};
-
-/* CMD CONFIG_RX_RING */
-struct mtnic_if_config_rx_ring_in_mbox {
- u8 reserved1;
- u8 ring; /* The ring index (with offset) */
- u8 stride_size; /* stride and size */
- /* Entry size = 16* (2^stride) bytes */
-#define MTNIC_MASK_CONFIG_RX_RING_STRIDE MTNIC_BC(4,3)
- /* Rx ring size is 2^size entries */
-#define MTNIC_MASK_CONFIG_RX_RING_SIZE MTNIC_BC(0,4)
- u8 flags; /* Bit0 - header separation */
- u8 page_size; /* Each mapped page is 2^(12+page_size) bytes */
- u8 reserved2[2];
- u8 cq; /* CQ associated with this ring */
- u32 db_record_addr_h;
- u32 db_record_addr_l;
- u32 page_address[0];/* Array of 2^size 64b page descriptor addresses */
- /* Must hold all Rx descriptors + doorbell record. */
-};
-
-/* The modifier for SET_RX_RING_ADDR */
-struct mtnic_if_set_rx_ring_modifier {
- u8 reserved;
- u8 port_num;
- u8 index;
- u8 ring;
-};
-
-/* CMD SET_RX_RING_ADDR */
-struct mtnic_if_set_rx_ring_addr_in_imm {
- u16 mac_47_32; /* UCAST MAC Address bits 47:32 */
- u16 flags_vlan_id; /* MAC/VLAN flags and vlan id */
-#define MTNIC_MASK_SET_RX_RING_ADDR_VLAN_ID MTNIC_BC(0,12)
-#define MTNIC_MASK_SET_RX_RING_ADDR_BY_MAC MTNIC_BC(12,1)
-#define MTNIC_MASK_SET_RX_RING_ADDR_BY_VLAN MTNIC_BC(13,1)
- u32 mac_31_0; /* UCAST MAC Address bits 31:0 */
-};
-
-/* CMD CONFIG_TX_RING */
-struct mtnic_if_config_send_ring_in_mbox {
- u16 ring; /* The ring index (with offset) */
-#define MTNIC_MASK_CONFIG_TX_RING_INDEX MTNIC_BC(0,8)
- u8 size; /* Tx ring size is 32*2^size bytes */
-#define MTNIC_MASK_CONFIG_TX_RING_SIZE MTNIC_BC(0,4)
- u8 reserved;
- u8 page_size; /* Each mapped page is 2^(12+page_size) bytes */
- u8 qos_class; /* The COS used for this Tx */
- u16 cq; /* CQ associated with this ring */
-#define MTNIC_MASK_CONFIG_TX_CQ_INDEX MTNIC_BC(0,8)
- u32 page_address[0]; /* 64 bit page addresses of descriptor buffer. */
- /* The buffer must accommodate all Tx descriptors */
-};
-
-/* CMD CONFIG_EQ */
-struct mtnic_if_config_eq_in_mbox {
- u8 reserved1;
- u8 int_vector; /* MSI index if MSI enabled; otherwise reserved */
-#define MTNIC_MASK_CONFIG_EQ_INT_VEC MTNIC_BC(0,6)
- u8 size; /* Num CQs is 2^size entries (size <= 22) */
-#define MTNIC_MASK_CONFIG_EQ_SIZE MTNIC_BC(0,5)
- u8 offset; /* Start address of CQE in first page (11:6) */
-#define MTNIC_MASK_CONFIG_EQ_OFFSET MTNIC_BC(0,6)
- u8 page_size; /* Each mapped page is 2^(12+page_size) bytes*/
- u8 reserved[3];
- u32 page_address[0]; /* 64 bit page addresses of EQ buffer */
-};
-
-/* CMD RELEASE_RESOURCE */
-enum mtnic_if_resource_types {
- MTNIC_IF_RESOURCE_TYPE_CQ = 0,
- MTNIC_IF_RESOURCE_TYPE_RX_RING,
- MTNIC_IF_RESOURCE_TYPE_TX_RING,
- MTNIC_IF_RESOURCE_TYPE_EQ
-};
-
-struct mtnic_if_release_resource_in_imm {
- u8 reserved1;
- u8 index; /* must be 0 for TYPE_EQ */
- u8 reserved2;
- u8 type; /* see enum mtnic_if_resource_types */
- u32 reserved3;
-};
-
-
-
-
-
-
-
-
-
-/*******************************************************************
-*
-* PCI addon structures
-*
-********************************************************************/
-
-struct pcidev {
- unsigned long bar[6];
- u32 dev_config_space[64];
- struct pci_device *dev;
- u8 bus;
- u8 devfn;
-};
-
-struct dev_pci_struct {
- struct pcidev dev;
- struct pcidev br;
-};
-
-/* The only global var */
-struct dev_pci_struct mtnic_pci_dev;
-
-
-
-#endif /* H_MTNIC_IF_DEFS_H */
-