aboutsummaryrefslogtreecommitdiffstats
path: root/examples
ModeNameSize
-rw-r--r--Makefile193logstatsplain
d---------api250logstatsplain
d---------standalone596logstatsplain
37' href='#n3237'>3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453
/*
 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/if_vlan.h>
#include <linux/mdio.h>
#include <linux/sockios.h>
#include <linux/workqueue.h>
#include <linux/proc_fs.h>
#include <linux/rtnetlink.h>
#include <linux/firmware.h>
#include <linux/log2.h>
#include <linux/stringify.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <asm/uaccess.h>

#include "common.h"
#include "cxgb3_ioctl.h"
#include "regs.h"
#include "cxgb3_offload.h"
#include "version.h"

#include "cxgb3_ctl_defs.h"
#include "t3_cpl.h"
#include "firmware_exports.h"

enum {
	MAX_TXQ_ENTRIES = 16384,
	MAX_CTRL_TXQ_ENTRIES = 1024,
	MAX_RSPQ_ENTRIES = 16384,
	MAX_RX_BUFFERS = 16384,
	MAX_RX_JUMBO_BUFFERS = 16384,
	MIN_TXQ_ENTRIES = 4,
	MIN_CTRL_TXQ_ENTRIES = 4,
	MIN_RSPQ_ENTRIES = 32,
	MIN_FL_ENTRIES = 32
};

#define PORT_MASK ((1 << MAX_NPORTS) - 1)

#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
			 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
			 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)

#define EEPROM_MAGIC 0x38E2F10C

#define CH_DEVICE(devid, idx) \
	{ PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }

static DEFINE_PCI_DEVICE_TABLE(cxgb3_pci_tbl) = {
	CH_DEVICE(0x20, 0),	/* PE9000 */
	CH_DEVICE(0x21, 1),	/* T302E */
	CH_DEVICE(0x22, 2),	/* T310E */
	CH_DEVICE(0x23, 3),	/* T320X */
	CH_DEVICE(0x24, 1),	/* T302X */
	CH_DEVICE(0x25, 3),	/* T320E */
	CH_DEVICE(0x26, 2),	/* T310X */
	CH_DEVICE(0x30, 2),	/* T3B10 */
	CH_DEVICE(0x31, 3),	/* T3B20 */
	CH_DEVICE(0x32, 1),	/* T3B02 */
	CH_DEVICE(0x35, 6),	/* T3C20-derived T3C10 */
	CH_DEVICE(0x36, 3),	/* S320E-CR */
	CH_DEVICE(0x37, 7),	/* N320E-G2 */
	{0,}
};

MODULE_DESCRIPTION(DRV_DESC);
MODULE_AUTHOR("Chelsio Communications");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);

static int dflt_msg_enable = DFLT_MSG_ENABLE;

module_param(dflt_msg_enable, int, 0644);
MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");

/*
 * The driver uses the best interrupt scheme available on a platform in the
 * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
 * of these schemes the driver may consider as follows:
 *
 * msi = 2: choose from among all three options
 * msi = 1: only consider MSI and pin interrupts
 * msi = 0: force pin interrupts
 */
static int msi = 2;

module_param(msi, int, 0644);
MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");

/*
 * The driver enables offload as a default.
 * To disable it, use ofld_disable = 1.
 */

static int ofld_disable = 0;

module_param(ofld_disable, int, 0644);
MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");

/*
 * We have work elements that we need to cancel when an interface is taken
 * down.  Normally the work elements would be executed by keventd but that
 * can deadlock because of linkwatch.  If our close method takes the rtnl
 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
 * for our work to complete.  Get our own work queue to solve this.
 */
struct workqueue_struct *cxgb3_wq;

/**
 *	link_report - show link status and link speed/duplex
 *	@p: the port whose settings are to be reported
 *
 *	Shows the link status, speed, and duplex of a port.
 */
static void link_report(struct net_device *dev)
{
	if (!netif_carrier_ok(dev))
		printk(KERN_INFO "%s: link down\n", dev->name);
	else {
		const char *s = "10Mbps";
		const struct port_info *p = netdev_priv(dev);

		switch (p->link_config.speed) {
		case SPEED_10000:
			s = "10Gbps";
			break;
		case SPEED_1000:
			s = "1000Mbps";
			break;
		case SPEED_100:
			s = "100Mbps";
			break;
		}

		printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
		       p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
	}
}

static void enable_tx_fifo_drain(struct adapter *adapter,
				 struct port_info *pi)
{
	t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset, 0,
			 F_ENDROPPKT);
	t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, 0);
	t3_write_reg(adapter, A_XGM_TX_CTRL + pi->mac.offset, F_TXEN);
	t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, F_RXEN);
}

static void disable_tx_fifo_drain(struct adapter *adapter,
				  struct port_info *pi)
{
	t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset,
			 F_ENDROPPKT, 0);
}

void t3_os_link_fault(struct adapter *adap, int port_id, int state)
{
	struct net_device *dev = adap->port[port_id];
	struct port_info *pi = netdev_priv(dev);

	if (state == netif_carrier_ok(dev))
		return;

	if (state) {
		struct cmac *mac = &pi->mac;

		netif_carrier_on(dev);

		disable_tx_fifo_drain(adap, pi);

		/* Clear local faults */
		t3_xgm_intr_disable(adap, pi->port_id);
		t3_read_reg(adap, A_XGM_INT_STATUS +
				    pi->mac.offset);
		t3_write_reg(adap,
			     A_XGM_INT_CAUSE + pi->mac.offset,
			     F_XGM_INT);

		t3_set_reg_field(adap,
				 A_XGM_INT_ENABLE +
				 pi->mac.offset,
				 F_XGM_INT, F_XGM_INT);
		t3_xgm_intr_enable(adap, pi->port_id);

		t3_mac_enable(mac, MAC_DIRECTION_TX);
	} else {
		netif_carrier_off(dev);

		/* Flush TX FIFO */
		enable_tx_fifo_drain(adap, pi);
	}
	link_report(dev);
}

/**
 *	t3_os_link_changed - handle link status changes
 *	@adapter: the adapter associated with the link change
 *	@port_id: the port index whose limk status has changed
 *	@link_stat: the new status of the link
 *	@speed: the new speed setting
 *	@duplex: the new duplex setting
 *	@pause: the new flow-control setting
 *
 *	This is the OS-dependent handler for link status changes.  The OS
 *	neutral handler takes care of most of the processing for these events,
 *	then calls this handler for any OS-specific processing.
 */
void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
			int speed, int duplex, int pause)
{
	struct net_device *dev = adapter->port[port_id];
	struct port_info *pi = netdev_priv(dev);
	struct cmac *mac = &pi->mac;

	/* Skip changes from disabled ports. */
	if (!netif_running(dev))
		return;

	if (link_stat != netif_carrier_ok(dev)) {
		if (link_stat) {
			disable_tx_fifo_drain(adapter, pi);

			t3_mac_enable(mac, MAC_DIRECTION_RX);

			/* Clear local faults */
			t3_xgm_intr_disable(adapter, pi->port_id);
			t3_read_reg(adapter, A_XGM_INT_STATUS +
				    pi->mac.offset);
			t3_write_reg(adapter,
				     A_XGM_INT_CAUSE + pi->mac.offset,
				     F_XGM_INT);

			t3_set_reg_field(adapter,
					 A_XGM_INT_ENABLE + pi->mac.offset,
					 F_XGM_INT, F_XGM_INT);
			t3_xgm_intr_enable(adapter, pi->port_id);

			netif_carrier_on(dev);
		} else {
			netif_carrier_off(dev);

			t3_xgm_intr_disable(adapter, pi->port_id);
			t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
			t3_set_reg_field(adapter,
					 A_XGM_INT_ENABLE + pi->mac.offset,
					 F_XGM_INT, 0);

			if (is_10G(adapter))
				pi->phy.ops->power_down(&pi->phy, 1);

			t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
			t3_mac_disable(mac, MAC_DIRECTION_RX);
			t3_link_start(&pi->phy, mac, &pi->link_config);

			/* Flush TX FIFO */
			enable_tx_fifo_drain(adapter, pi);
		}

		link_report(dev);
	}
}

/**
 *	t3_os_phymod_changed - handle PHY module changes
 *	@phy: the PHY reporting the module change
 *	@mod_type: new module type
 *
 *	This is the OS-dependent handler for PHY module changes.  It is
 *	invoked when a PHY module is removed or inserted for any OS-specific
 *	processing.
 */
void t3_os_phymod_changed(struct adapter *adap, int port_id)
{
	static const char *mod_str[] = {
		NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
	};

	const struct net_device *dev = adap->port[port_id];
	const struct port_info *pi = netdev_priv(dev);

	if (pi->phy.modtype == phy_modtype_none)
		printk(KERN_INFO "%s: PHY module unplugged\n", dev->name);
	else
		printk(KERN_INFO "%s: %s PHY module inserted\n", dev->name,
		       mod_str[pi->phy.modtype]);
}

static void cxgb_set_rxmode(struct net_device *dev)
{
	struct port_info *pi = netdev_priv(dev);

	t3_mac_set_rx_mode(&pi->mac, dev);
}

/**
 *	link_start - enable a port
 *	@dev: the device to enable
 *
 *	Performs the MAC and PHY actions needed to enable a port.
 */
static void link_start(struct net_device *dev)
{
	struct port_info *pi = netdev_priv(dev);
	struct cmac *mac = &pi->mac;

	t3_mac_reset(mac);
	t3_mac_set_num_ucast(mac, MAX_MAC_IDX);
	t3_mac_set_mtu(mac, dev->mtu);
	t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
	t3_mac_set_address(mac, SAN_MAC_IDX, pi->iscsic.mac_addr);
	t3_mac_set_rx_mode(mac, dev);
	t3_link_start(&pi->phy, mac, &pi->link_config);
	t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
}

static inline void cxgb_disable_msi(struct adapter *adapter)
{
	if (adapter->flags & USING_MSIX) {
		pci_disable_msix(adapter->pdev);
		adapter->flags &= ~USING_MSIX;
	} else if (adapter->flags & USING_MSI) {
		pci_disable_msi(adapter->pdev);
		adapter->flags &= ~USING_MSI;
	}
}

/*
 * Interrupt handler for asynchronous events used with MSI-X.
 */
static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
{
	t3_slow_intr_handler(cookie);
	return IRQ_HANDLED;
}

/*
 * Name the MSI-X interrupts.
 */
static void name_msix_vecs(struct adapter *adap)
{
	int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;

	snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
	adap->msix_info[0].desc[n] = 0;

	for_each_port(adap, j) {
		struct net_device *d = adap->port[j];
		const struct port_info *pi = netdev_priv(d);

		for (i = 0; i < pi->nqsets; i++, msi_idx++) {
			snprintf(adap->msix_info[msi_idx].desc, n,
				 "%s-%d", d->name, pi->first_qset + i);
			adap->msix_info[msi_idx].desc[n] = 0;
		}
	}
}

static int request_msix_data_irqs(struct adapter *adap)
{
	int i, j, err, qidx = 0;

	for_each_port(adap, i) {
		int nqsets = adap2pinfo(adap, i)->nqsets;

		for (j = 0; j < nqsets; ++j) {
			err = request_irq(adap->msix_info[qidx + 1].vec,
					  t3_intr_handler(adap,
							  adap->sge.qs[qidx].
							  rspq.polling), 0,
					  adap->msix_info[qidx + 1].desc,
					  &adap->sge.qs[qidx]);
			if (err) {
				while (--qidx >= 0)
					free_irq(adap->msix_info[qidx + 1].vec,
						 &adap->sge.qs[qidx]);
				return err;
			}
			qidx++;
		}
	}
	return 0;
}

static void free_irq_resources(struct adapter *adapter)
{
	if (adapter->flags & USING_MSIX) {
		int i, n = 0;

		free_irq(adapter->msix_info[0].vec, adapter);
		for_each_port(adapter, i)
			n += adap2pinfo(adapter, i)->nqsets;

		for (i = 0; i < n; ++i)
			free_irq(adapter->msix_info[i + 1].vec,
				 &adapter->sge.qs[i]);
	} else
		free_irq(adapter->pdev->irq, adapter);
}

static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
			      unsigned long n)
{
	int attempts = 5;

	while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
		if (!--attempts)
			return -ETIMEDOUT;
		msleep(10);
	}
	return 0;
}

static int init_tp_parity(struct adapter *adap)
{
	int i;
	struct sk_buff *skb;
	struct cpl_set_tcb_field *greq;
	unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;

	t3_tp_set_offload_mode(adap, 1);

	for (i = 0; i < 16; i++) {
		struct cpl_smt_write_req *req;

		skb = alloc_skb(sizeof(*req), GFP_KERNEL);
		if (!skb)
			skb = adap->nofail_skb;
		if (!skb)
			goto alloc_skb_fail;

		req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
		memset(req, 0, sizeof(*req));
		req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
		OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
		req->mtu_idx = NMTUS - 1;
		req->iff = i;
		t3_mgmt_tx(adap, skb);
		if (skb == adap->nofail_skb) {
			await_mgmt_replies(adap, cnt, i + 1);
			adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
			if (!adap->nofail_skb)
				goto alloc_skb_fail;
		}
	}

	for (i = 0; i < 2048; i++) {
		struct cpl_l2t_write_req *req;

		skb = alloc_skb(sizeof(*req), GFP_KERNEL);
		if (!skb)
			skb = adap->nofail_skb;
		if (!skb)
			goto alloc_skb_fail;

		req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
		memset(req, 0, sizeof(*req));
		req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
		OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
		req->params = htonl(V_L2T_W_IDX(i));
		t3_mgmt_tx(adap, skb);
		if (skb == adap->nofail_skb) {
			await_mgmt_replies(adap, cnt, 16 + i + 1);
			adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
			if (!adap->nofail_skb)
				goto alloc_skb_fail;
		}
	}

	for (i = 0; i < 2048; i++) {
		struct cpl_rte_write_req *req;

		skb = alloc_skb(sizeof(*req), GFP_KERNEL);
		if (!skb)
			skb = adap->nofail_skb;
		if (!skb)
			goto alloc_skb_fail;

		req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
		memset(req, 0, sizeof(*req));
		req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
		OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
		req->l2t_idx = htonl(V_L2T_W_IDX(i));
		t3_mgmt_tx(adap, skb);
		if (skb == adap->nofail_skb) {
			await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1);
			adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
			if (!adap->nofail_skb)
				goto alloc_skb_fail;
		}
	}

	skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
	if (!skb)
		skb = adap->nofail_skb;
	if (!skb)
		goto alloc_skb_fail;

	greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
	memset(greq, 0, sizeof(*greq));
	greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
	OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
	greq->mask = cpu_to_be64(1);
	t3_mgmt_tx(adap, skb);

	i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
	if (skb == adap->nofail_skb) {
		i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
		adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
	}

	t3_tp_set_offload_mode(adap, 0);
	return i;

alloc_skb_fail:
	t3_tp_set_offload_mode(adap, 0);
	return -ENOMEM;
}

/**
 *	setup_rss - configure RSS
 *	@adap: the adapter
 *
 *	Sets up RSS to distribute packets to multiple receive queues.  We
 *	configure the RSS CPU lookup table to distribute to the number of HW
 *	receive queues, and the response queue lookup table to narrow that
 *	down to the response queues actually configured for each port.
 *	We always configure the RSS mapping for two ports since the mapping
 *	table has plenty of entries.
 */
static void setup_rss(struct adapter *adap)
{
	int i;
	unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
	unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
	u8 cpus[SGE_QSETS + 1];
	u16 rspq_map[RSS_TABLE_SIZE];

	for (i = 0; i < SGE_QSETS; ++i)
		cpus[i] = i;
	cpus[SGE_QSETS] = 0xff;	/* terminator */

	for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
		rspq_map[i] = i % nq0;
		rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
	}

	t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
		      F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
		      V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
}

static void ring_dbs(struct adapter *adap)
{
	int i, j;

	for (i = 0; i < SGE_QSETS; i++) {
		struct sge_qset *qs = &adap->sge.qs[i];

		if (qs->adap)
			for (j = 0; j < SGE_TXQ_PER_SET; j++)
				t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id));
	}
}

static void init_napi(struct adapter *adap)
{
	int i;

	for (i = 0; i < SGE_QSETS; i++) {
		struct sge_qset *qs = &adap->sge.qs[i];

		if (qs->adap)
			netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
				       64);
	}

	/*
	 * netif_napi_add() can be called only once per napi_struct because it
	 * adds each new napi_struct to a list.  Be careful not to call it a
	 * second time, e.g., during EEH recovery, by making a note of it.
	 */
	adap->flags |= NAPI_INIT;
}

/*
 * Wait until all NAPI handlers are descheduled.  This includes the handlers of
 * both netdevices representing interfaces and the dummy ones for the extra
 * queues.
 */
static void quiesce_rx(struct adapter *adap)
{
	int i;

	for (i = 0; i < SGE_QSETS; i++)
		if (adap->sge.qs[i].adap)
			napi_disable(&adap->sge.qs[i].napi);
}

static void enable_all_napi(struct adapter *adap)
{
	int i;
	for (i = 0; i < SGE_QSETS; i++)
		if (adap->sge.qs[i].adap)
			napi_enable(&adap->sge.qs[i].napi);
}

/**
 *	set_qset_lro - Turn a queue set's LRO capability on and off
 *	@dev: the device the qset is attached to
 *	@qset_idx: the queue set index
 *	@val: the LRO switch
 *
 *	Sets LRO on or off for a particular queue set.
 *	the device's features flag is updated to reflect the LRO
 *	capability when all queues belonging to the device are
 *	in the same state.
 */
static void set_qset_lro(struct net_device *dev, int qset_idx, int val)
{
	struct port_info *pi = netdev_priv(dev);
	struct adapter *adapter = pi->adapter;

	adapter->params.sge.qset[qset_idx].lro = !!val;
	adapter->sge.qs[qset_idx].lro_enabled = !!val;
}

/**
 *	setup_sge_qsets - configure SGE Tx/Rx/response queues
 *	@adap: the adapter
 *
 *	Determines how many sets of SGE queues to use and initializes them.
 *	We support multiple queue sets per port if we have MSI-X, otherwise
 *	just one queue set per port.
 */
static int setup_sge_qsets(struct adapter *adap)
{
	int i, j, err, irq_idx = 0, qset_idx = 0;
	unsigned int ntxq = SGE_TXQ_PER_SET;

	if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
		irq_idx = -1;

	for_each_port(adap, i) {
		struct net_device *dev = adap->port[i];
		struct port_info *pi = netdev_priv(dev);

		pi->qs = &adap->sge.qs[pi->first_qset];
		for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
			set_qset_lro(dev, qset_idx, pi->rx_offload & T3_LRO);
			err = t3_sge_alloc_qset(adap, qset_idx, 1,
				(adap->flags & USING_MSIX) ? qset_idx + 1 :
							     irq_idx,
				&adap->params.sge.qset[qset_idx], ntxq, dev,
				netdev_get_tx_queue(dev, j));
			if (err) {
				t3_free_sge_resources(adap);
				return err;
			}
		}
	}

	return 0;
}

static ssize_t attr_show(struct device *d, char *buf,
			 ssize_t(*format) (struct net_device *, char *))
{
	ssize_t len;

	/* Synchronize with ioctls that may shut down the device */
	rtnl_lock();
	len = (*format) (to_net_dev(d), buf);
	rtnl_unlock();
	return len;
}

static ssize_t attr_store(struct device *d,
			  const char *buf, size_t len,
			  ssize_t(*set) (struct net_device *, unsigned int),
			  unsigned int min_val, unsigned int max_val)
{
	char *endp;
	ssize_t ret;
	unsigned int val;

	if (!capable(CAP_NET_ADMIN))
		return -EPERM;

	val = simple_strtoul(buf, &endp, 0);
	if (endp == buf || val < min_val || val > max_val)
		return -EINVAL;

	rtnl_lock();
	ret = (*set) (to_net_dev(d), val);
	if (!ret)
		ret = len;
	rtnl_unlock();
	return ret;
}

#define CXGB3_SHOW(name, val_expr) \
static ssize_t format_##name(struct net_device *dev, char *buf) \
{ \
	struct port_info *pi = netdev_priv(dev); \
	struct adapter *adap = pi->adapter; \
	return sprintf(buf, "%u\n", val_expr); \
} \
static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
			   char *buf) \
{ \
	return attr_show(d, buf, format_##name); \
}

static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
{
	struct port_info *pi = netdev_priv(dev);
	struct adapter *adap = pi->adapter;
	int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;

	if (adap->flags & FULL_INIT_DONE)
		return -EBUSY;
	if (val && adap->params.rev == 0)
		return -EINVAL;
	if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
	    min_tids)
		return -EINVAL;
	adap->params.mc5.nfilters = val;
	return 0;
}

static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
			      const char *buf, size_t len)
{
	return attr_store(d, buf, len, set_nfilters, 0, ~0);
}

static ssize_t set_nservers(struct net_device *dev, unsigned int val)
{
	struct port_info *pi = netdev_priv(dev);
	struct adapter *adap = pi->adapter;

	if (adap->flags & FULL_INIT_DONE)
		return -EBUSY;
	if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
	    MC5_MIN_TIDS)
		return -EINVAL;
	adap->params.mc5.nservers = val;
	return 0;
}

static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
			      const char *buf, size_t len)
{
	return attr_store(d, buf, len, set_nservers, 0, ~0);
}

#define CXGB3_ATTR_R(name, val_expr) \
CXGB3_SHOW(name, val_expr) \
static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)

#define CXGB3_ATTR_RW(name, val_expr, store_method) \
CXGB3_SHOW(name, val_expr) \
static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)

CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);

static struct attribute *cxgb3_attrs[] = {
	&dev_attr_cam_size.attr,
	&dev_attr_nfilters.attr,
	&dev_attr_nservers.attr,
	NULL
};

static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };

static ssize_t tm_attr_show(struct device *d,
			    char *buf, int sched)
{
	struct port_info *pi = netdev_priv(to_net_dev(d));
	struct adapter *adap = pi->adapter;
	unsigned int v, addr, bpt, cpt;
	ssize_t len;

	addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
	rtnl_lock();
	t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
	v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
	if (sched & 1)
		v >>= 16;
	bpt = (v >> 8) & 0xff;
	cpt = v & 0xff;
	if (!cpt)
		len = sprintf(buf, "disabled\n");
	else {
		v = (adap->params.vpd.cclk * 1000) / cpt;
		len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
	}
	rtnl_unlock();
	return len;
}

static ssize_t tm_attr_store(struct device *d,
			     const char *buf, size_t len, int sched)
{
	struct port_info *pi = netdev_priv(to_net_dev(d));
	struct adapter *adap = pi->adapter;
	unsigned int val;
	char *endp;
	ssize_t ret;

	if (!capable(CAP_NET_ADMIN))
		return -EPERM;

	val = simple_strtoul(buf, &endp, 0);
	if (endp == buf || val > 10000000)
		return -EINVAL;

	rtnl_lock();
	ret = t3_config_sched(adap, val, sched);
	if (!ret)
		ret = len;
	rtnl_unlock();
	return ret;
}

#define TM_ATTR(name, sched) \
static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
			   char *buf) \
{ \
	return tm_attr_show(d, buf, sched); \
} \
static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
			    const char *buf, size_t len) \
{ \
	return tm_attr_store(d, buf, len, sched); \
} \
static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)

TM_ATTR(sched0, 0);
TM_ATTR(sched1, 1);
TM_ATTR(sched2, 2);
TM_ATTR(sched3, 3);
TM_ATTR(sched4, 4);
TM_ATTR(sched5, 5);
TM_ATTR(sched6, 6);
TM_ATTR(sched7, 7);

static struct attribute *offload_attrs[] = {
	&dev_attr_sched0.attr,
	&dev_attr_sched1.attr,
	&dev_attr_sched2.attr,
	&dev_attr_sched3.attr,
	&dev_attr_sched4.attr,
	&dev_attr_sched5.attr,
	&dev_attr_sched6.attr,
	&dev_attr_sched7.attr,
	NULL
};

static struct attribute_group offload_attr_group = {.attrs = offload_attrs };

/*
 * Sends an sk_buff to an offload queue driver
 * after dealing with any active network taps.
 */
static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
{
	int ret;

	local_bh_disable();
	ret = t3_offload_tx(tdev, skb);
	local_bh_enable();
	return ret;
}

static int write_smt_entry(struct adapter *adapter, int idx)
{
	struct cpl_smt_write_req *req;
	struct port_info *pi = netdev_priv(adapter->port[idx]);
	struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);

	if (!skb)
		return -ENOMEM;

	req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
	req->mtu_idx = NMTUS - 1;	/* should be 0 but there's a T3 bug */
	req->iff = idx;
	memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
	memcpy(req->src_mac1, pi->iscsic.mac_addr, ETH_ALEN);
	skb->priority = 1;
	offload_tx(&adapter->tdev, skb);
	return 0;
}

static int init_smt(struct adapter *adapter)
{
	int i;

	for_each_port(adapter, i)
	    write_smt_entry(adapter, i);
	return 0;
}

static void init_port_mtus(struct adapter *adapter)
{
	unsigned int mtus = adapter->port[0]->mtu;

	if (adapter->port[1])
		mtus |= adapter->port[1]->mtu << 16;
	t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
}

static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
			      int hi, int port)
{
	struct sk_buff *skb;
	struct mngt_pktsched_wr *req;
	int ret;

	skb = alloc_skb(sizeof(*req), GFP_KERNEL);
	if (!skb)
		skb = adap->nofail_skb;
	if (!skb)
		return -ENOMEM;

	req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
	req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
	req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
	req->sched = sched;
	req->idx = qidx;
	req->min = lo;
	req->max = hi;
	req->binding = port;
	ret = t3_mgmt_tx(adap, skb);
	if (skb == adap->nofail_skb) {
		adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
					     GFP_KERNEL);
		if (!adap->nofail_skb)
			ret = -ENOMEM;
	}

	return ret;
}

static int bind_qsets(struct adapter *adap)
{
	int i, j, err = 0;

	for_each_port(adap, i) {
		const struct port_info *pi = adap2pinfo(adap, i);

		for (j = 0; j < pi->nqsets; ++j) {
			int ret = send_pktsched_cmd(adap, 1,
						    pi->first_qset + j, -1,
						    -1, i);
			if (ret)
				err = ret;
		}
	}

	return err;
}

#define FW_VERSION __stringify(FW_VERSION_MAJOR) "."			\
	__stringify(FW_VERSION_MINOR) "." __stringify(FW_VERSION_MICRO)
#define FW_FNAME "cxgb3/t3fw-" FW_VERSION ".bin"
#define TPSRAM_VERSION __stringify(TP_VERSION_MAJOR) "."		\
	__stringify(TP_VERSION_MINOR) "." __stringify(TP_VERSION_MICRO)
#define TPSRAM_NAME "cxgb3/t3%c_psram-" TPSRAM_VERSION ".bin"
#define AEL2005_OPT_EDC_NAME "cxgb3/ael2005_opt_edc.bin"
#define AEL2005_TWX_EDC_NAME "cxgb3/ael2005_twx_edc.bin"
#define AEL2020_TWX_EDC_NAME "cxgb3/ael2020_twx_edc.bin"
MODULE_FIRMWARE(FW_FNAME);
MODULE_FIRMWARE("cxgb3/t3b_psram-" TPSRAM_VERSION ".bin");
MODULE_FIRMWARE("cxgb3/t3c_psram-" TPSRAM_VERSION ".bin");
MODULE_FIRMWARE(AEL2005_OPT_EDC_NAME);
MODULE_FIRMWARE(AEL2005_TWX_EDC_NAME);
MODULE_FIRMWARE(AEL2020_TWX_EDC_NAME);

static inline const char *get_edc_fw_name(int edc_idx)
{
	const char *fw_name = NULL;

	switch (edc_idx) {
	case EDC_OPT_AEL2005:
		fw_name = AEL2005_OPT_EDC_NAME;
		break;
	case EDC_TWX_AEL2005:
		fw_name = AEL2005_TWX_EDC_NAME;
		break;
	case EDC_TWX_AEL2020:
		fw_name = AEL2020_TWX_EDC_NAME;
		break;
	}
	return fw_name;
}

int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size)
{
	struct adapter *adapter = phy->adapter;
	const struct firmware *fw;
	char buf[64];
	u32 csum;
	const __be32 *p;
	u16 *cache = phy->phy_cache;
	int i, ret;

	snprintf(buf, sizeof(buf), get_edc_fw_name(edc_idx));

	ret = request_firmware(&fw, buf, &adapter->pdev->dev);
	if (ret < 0) {
		dev_err(&adapter->pdev->dev,
			"could not upgrade firmware: unable to load %s\n",
			buf);
		return ret;
	}

	/* check size, take checksum in account */
	if (fw->size > size + 4) {
		CH_ERR(adapter, "firmware image too large %u, expected %d\n",
		       (unsigned int)fw->size, size + 4);
		ret = -EINVAL;
	}

	/* compute checksum */
	p = (const __be32 *)fw->data;
	for (csum = 0, i = 0; i < fw->size / sizeof(csum); i++)
		csum += ntohl(p[i]);

	if (csum != 0xffffffff) {
		CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
		       csum);
		ret = -EINVAL;
	}

	for (i = 0; i < size / 4 ; i++) {
		*cache++ = (be32_to_cpu(p[i]) & 0xffff0000) >> 16;
		*cache++ = be32_to_cpu(p[i]) & 0xffff;
	}

	release_firmware(fw);

	return ret;
}

static int upgrade_fw(struct adapter *adap)
{
	int ret;
	const struct firmware *fw;
	struct device *dev = &adap->pdev->dev;

	ret = request_firmware(&fw, FW_FNAME, dev);
	if (ret < 0) {
		dev_err(dev, "could not upgrade firmware: unable to load %s\n",
			FW_FNAME);
		return ret;
	}
	ret = t3_load_fw(adap, fw->data, fw->size);
	release_firmware(fw);

	if (ret == 0)
		dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
			 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
	else
		dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
			FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);

	return ret;
}

static inline char t3rev2char(struct adapter *adapter)
{
	char rev = 0;

	switch(adapter->params.rev) {
	case T3_REV_B:
	case T3_REV_B2:
		rev = 'b';
		break;
	case T3_REV_C:
		rev = 'c';
		break;
	}
	return rev;
}

static int update_tpsram(struct adapter *adap)
{
	const struct firmware *tpsram;
	char buf[64];
	struct device *dev = &adap->pdev->dev;
	int ret;
	char rev;

	rev = t3rev2char(adap);
	if (!rev)
		return 0;

	snprintf(buf, sizeof(buf), TPSRAM_NAME, rev);

	ret = request_firmware(&tpsram, buf, dev);
	if (ret < 0) {
		dev_err(dev, "could not load TP SRAM: unable to load %s\n",
			buf);
		return ret;
	}

	ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
	if (ret)
		goto release_tpsram;

	ret = t3_set_proto_sram(adap, tpsram->data);
	if (ret == 0)
		dev_info(dev,
			 "successful update of protocol engine "
			 "to %d.%d.%d\n",
			 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
	else
		dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
			TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
	if (ret)
		dev_err(dev, "loading protocol SRAM failed\n");

release_tpsram:
	release_firmware(tpsram);

	return ret;
}

/**
 *	cxgb_up - enable the adapter
 *	@adapter: adapter being enabled
 *
 *	Called when the first port is enabled, this function performs the
 *	actions necessary to make an adapter operational, such as completing
 *	the initialization of HW modules, and enabling interrupts.
 *
 *	Must be called with the rtnl lock held.
 */
static int cxgb_up(struct adapter *adap)
{
	int err;

	if (!(adap->flags & FULL_INIT_DONE)) {
		err = t3_check_fw_version(adap);
		if (err == -EINVAL) {
			err = upgrade_fw(adap);
			CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
				FW_VERSION_MAJOR, FW_VERSION_MINOR,
				FW_VERSION_MICRO, err ? "failed" : "succeeded");
		}

		err = t3_check_tpsram_version(adap);
		if (err == -EINVAL) {
			err = update_tpsram(adap);
			CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
				TP_VERSION_MAJOR, TP_VERSION_MINOR,
				TP_VERSION_MICRO, err ? "failed" : "succeeded");
		}

		/*
		 * Clear interrupts now to catch errors if t3_init_hw fails.
		 * We clear them again later as initialization may trigger
		 * conditions that can interrupt.
		 */
		t3_intr_clear(adap);

		err = t3_init_hw(adap, 0);
		if (err)
			goto out;

		t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
		t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));

		err = setup_sge_qsets(adap);
		if (err)
			goto out;

		setup_rss(adap);
		if (!(adap->flags & NAPI_INIT))
			init_napi(adap);

		t3_start_sge_timers(adap);
		adap->flags |= FULL_INIT_DONE;
	}

	t3_intr_clear(adap);

	if (adap->flags & USING_MSIX) {
		name_msix_vecs(adap);
		err = request_irq(adap->msix_info[0].vec,
				  t3_async_intr_handler, 0,
				  adap->msix_info[0].desc, adap);
		if (err)
			goto irq_err;

		err = request_msix_data_irqs(adap);
		if (err) {
			free_irq(adap->msix_info[0].vec, adap);
			goto irq_err;
		}
	} else if ((err = request_irq(adap->pdev->irq,
				      t3_intr_handler(adap,
						      adap->sge.qs[0].rspq.
						      polling),
				      (adap->flags & USING_MSI) ?
				       0 : IRQF_SHARED,
				      adap->name, adap)))
		goto irq_err;

	enable_all_napi(adap);
	t3_sge_start(adap);
	t3_intr_enable(adap);

	if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
	    is_offload(adap) && init_tp_parity(adap) == 0)
		adap->flags |= TP_PARITY_INIT;

	if (adap->flags & TP_PARITY_INIT) {
		t3_write_reg(adap, A_TP_INT_CAUSE,
			     F_CMCACHEPERR | F_ARPLUTPERR);
		t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
	}

	if (!(adap->flags & QUEUES_BOUND)) {
		err = bind_qsets(adap);
		if (err) {
			CH_ERR(adap, "failed to bind qsets, err %d\n", err);
			t3_intr_disable(adap);
			free_irq_resources(adap);
			goto out;
		}
		adap->flags |= QUEUES_BOUND;
	}

out:
	return err;
irq_err:
	CH_ERR(adap, "request_irq failed, err %d\n", err);
	goto out;
}

/*
 * Release resources when all the ports and offloading have been stopped.
 */
static void cxgb_down(struct adapter *adapter)
{
	t3_sge_stop(adapter);
	spin_lock_irq(&adapter->work_lock);	/* sync with PHY intr task */
	t3_intr_disable(adapter);
	spin_unlock_irq(&adapter->work_lock);

	free_irq_resources(adapter);
	quiesce_rx(adapter);
	t3_sge_stop(adapter);
	flush_workqueue(cxgb3_wq);	/* wait for external IRQ handler */
}

static void schedule_chk_task(struct adapter *adap)
{
	unsigned int timeo;

	timeo = adap->params.linkpoll_period ?
	    (HZ * adap->params.linkpoll_period) / 10 :
	    adap->params.stats_update_period * HZ;
	if (timeo)
		queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
}

static int offload_open(struct net_device *dev)
{
	struct port_info *pi = netdev_priv(dev);
	struct adapter *adapter = pi->adapter;
	struct t3cdev *tdev = dev2t3cdev(dev);
	int adap_up = adapter->open_device_map & PORT_MASK;
	int err;

	if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
		return 0;

	if (!adap_up && (err = cxgb_up(adapter)) < 0)
		goto out;

	t3_tp_set_offload_mode(adapter, 1);
	tdev->lldev = adapter->port[0];
	err = cxgb3_offload_activate(adapter);
	if (err)
		goto out;

	init_port_mtus(adapter);
	t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
		     adapter->params.b_wnd,
		     adapter->params.rev == 0 ?
		     adapter->port[0]->mtu : 0xffff);
	init_smt(adapter);

	if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
		dev_dbg(&dev->dev, "cannot create sysfs group\n");

	/* Call back all registered clients */
	cxgb3_add_clients(tdev);

out:
	/* restore them in case the offload module has changed them */
	if (err) {
		t3_tp_set_offload_mode(adapter, 0);