blob: eb1492f4df2daaa65e4a9a3d2023ff7e0b9509c8 [file] [log] [blame]
Dimitris Michailidisb8ff05a92010-04-01 15:28:26 +00001/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37#include <linux/bitmap.h>
38#include <linux/crc32.h>
39#include <linux/ctype.h>
40#include <linux/debugfs.h>
41#include <linux/err.h>
42#include <linux/etherdevice.h>
43#include <linux/firmware.h>
44#include <linux/if_vlan.h>
45#include <linux/init.h>
46#include <linux/log2.h>
47#include <linux/mdio.h>
48#include <linux/module.h>
49#include <linux/moduleparam.h>
50#include <linux/mutex.h>
51#include <linux/netdevice.h>
52#include <linux/pci.h>
53#include <linux/aer.h>
54#include <linux/rtnetlink.h>
55#include <linux/sched.h>
56#include <linux/seq_file.h>
57#include <linux/sockios.h>
58#include <linux/vmalloc.h>
59#include <linux/workqueue.h>
60#include <net/neighbour.h>
61#include <net/netevent.h>
62#include <asm/uaccess.h>
63
64#include "cxgb4.h"
65#include "t4_regs.h"
66#include "t4_msg.h"
67#include "t4fw_api.h"
68#include "l2t.h"
69
70#define DRV_VERSION "1.0.0-ko"
71#define DRV_DESC "Chelsio T4 Network Driver"
72
73/*
74 * Max interrupt hold-off timer value in us. Queues fall back to this value
75 * under extreme memory pressure so it's largish to give the system time to
76 * recover.
77 */
78#define MAX_SGE_TIMERVAL 200U
79
80enum {
81 MEMWIN0_APERTURE = 65536,
82 MEMWIN0_BASE = 0x30000,
83 MEMWIN1_APERTURE = 32768,
84 MEMWIN1_BASE = 0x28000,
85 MEMWIN2_APERTURE = 2048,
86 MEMWIN2_BASE = 0x1b800,
87};
88
89enum {
90 MAX_TXQ_ENTRIES = 16384,
91 MAX_CTRL_TXQ_ENTRIES = 1024,
92 MAX_RSPQ_ENTRIES = 16384,
93 MAX_RX_BUFFERS = 16384,
94 MIN_TXQ_ENTRIES = 32,
95 MIN_CTRL_TXQ_ENTRIES = 32,
96 MIN_RSPQ_ENTRIES = 128,
97 MIN_FL_ENTRIES = 16
98};
99
100#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
101 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
102 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
103
104#define CH_DEVICE(devid) { PCI_VDEVICE(CHELSIO, devid), 0 }
105
106static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
107 CH_DEVICE(0xa000), /* PE10K */
108 { 0, }
109};
110
111#define FW_FNAME "cxgb4/t4fw.bin"
112
113MODULE_DESCRIPTION(DRV_DESC);
114MODULE_AUTHOR("Chelsio Communications");
115MODULE_LICENSE("Dual BSD/GPL");
116MODULE_VERSION(DRV_VERSION);
117MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
118MODULE_FIRMWARE(FW_FNAME);
119
120static int dflt_msg_enable = DFLT_MSG_ENABLE;
121
122module_param(dflt_msg_enable, int, 0644);
123MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
124
125/*
126 * The driver uses the best interrupt scheme available on a platform in the
127 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
128 * of these schemes the driver may consider as follows:
129 *
130 * msi = 2: choose from among all three options
131 * msi = 1: only consider MSI and INTx interrupts
132 * msi = 0: force INTx interrupts
133 */
134static int msi = 2;
135
136module_param(msi, int, 0644);
137MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
138
139/*
140 * Queue interrupt hold-off timer values. Queues default to the first of these
141 * upon creation.
142 */
143static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
144
145module_param_array(intr_holdoff, uint, NULL, 0644);
146MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
147 "0..4 in microseconds");
148
149static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
150
151module_param_array(intr_cnt, uint, NULL, 0644);
152MODULE_PARM_DESC(intr_cnt,
153 "thresholds 1..3 for queue interrupt packet counters");
154
155static int vf_acls;
156
157#ifdef CONFIG_PCI_IOV
158module_param(vf_acls, bool, 0644);
159MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
160
161static unsigned int num_vf[4];
162
163module_param_array(num_vf, uint, NULL, 0644);
164MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
165#endif
166
167static struct dentry *cxgb4_debugfs_root;
168
169static LIST_HEAD(adapter_list);
170static DEFINE_MUTEX(uld_mutex);
171static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
172static const char *uld_str[] = { "RDMA", "iSCSI" };
173
174static void link_report(struct net_device *dev)
175{
176 if (!netif_carrier_ok(dev))
177 netdev_info(dev, "link down\n");
178 else {
179 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
180
181 const char *s = "10Mbps";
182 const struct port_info *p = netdev_priv(dev);
183
184 switch (p->link_cfg.speed) {
185 case SPEED_10000:
186 s = "10Gbps";
187 break;
188 case SPEED_1000:
189 s = "1000Mbps";
190 break;
191 case SPEED_100:
192 s = "100Mbps";
193 break;
194 }
195
196 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
197 fc[p->link_cfg.fc]);
198 }
199}
200
201void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
202{
203 struct net_device *dev = adapter->port[port_id];
204
205 /* Skip changes from disabled ports. */
206 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
207 if (link_stat)
208 netif_carrier_on(dev);
209 else
210 netif_carrier_off(dev);
211
212 link_report(dev);
213 }
214}
215
216void t4_os_portmod_changed(const struct adapter *adap, int port_id)
217{
218 static const char *mod_str[] = {
219 NULL, "LR", "SR", "ER", "passive DA", "active DA"
220 };
221
222 const struct net_device *dev = adap->port[port_id];
223 const struct port_info *pi = netdev_priv(dev);
224
225 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
226 netdev_info(dev, "port module unplugged\n");
227 else
228 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
229}
230
231/*
232 * Configure the exact and hash address filters to handle a port's multicast
233 * and secondary unicast MAC addresses.
234 */
235static int set_addr_filters(const struct net_device *dev, bool sleep)
236{
237 u64 mhash = 0;
238 u64 uhash = 0;
239 bool free = true;
240 u16 filt_idx[7];
241 const u8 *addr[7];
242 int ret, naddr = 0;
Dimitris Michailidisb8ff05a92010-04-01 15:28:26 +0000243 const struct netdev_hw_addr *ha;
244 int uc_cnt = netdev_uc_count(dev);
David S. Miller4a35ecf2010-04-06 23:53:30 -0700245 int mc_cnt = netdev_mc_count(dev);
Dimitris Michailidisb8ff05a92010-04-01 15:28:26 +0000246 const struct port_info *pi = netdev_priv(dev);
247
248 /* first do the secondary unicast addresses */
249 netdev_for_each_uc_addr(ha, dev) {
250 addr[naddr++] = ha->addr;
251 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
252 ret = t4_alloc_mac_filt(pi->adapter, 0, pi->viid, free,
253 naddr, addr, filt_idx, &uhash, sleep);
254 if (ret < 0)
255 return ret;
256
257 free = false;
258 naddr = 0;
259 }
260 }
261
262 /* next set up the multicast addresses */
David S. Miller4a35ecf2010-04-06 23:53:30 -0700263 netdev_for_each_mc_addr(ha, dev) {
264 addr[naddr++] = ha->addr;
265 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
Dimitris Michailidisb8ff05a92010-04-01 15:28:26 +0000266 ret = t4_alloc_mac_filt(pi->adapter, 0, pi->viid, free,
267 naddr, addr, filt_idx, &mhash, sleep);
268 if (ret < 0)
269 return ret;
270
271 free = false;
272 naddr = 0;
273 }
274 }
275
276 return t4_set_addr_hash(pi->adapter, 0, pi->viid, uhash != 0,
277 uhash | mhash, sleep);
278}
279
280/*
281 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
282 * If @mtu is -1 it is left unchanged.
283 */
284static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
285{
286 int ret;
287 struct port_info *pi = netdev_priv(dev);
288
289 ret = set_addr_filters(dev, sleep_ok);
290 if (ret == 0)
291 ret = t4_set_rxmode(pi->adapter, 0, pi->viid, mtu,
292 (dev->flags & IFF_PROMISC) ? 1 : 0,
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +0000293 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
Dimitris Michailidisb8ff05a92010-04-01 15:28:26 +0000294 sleep_ok);
295 return ret;
296}
297
298/**
299 * link_start - enable a port
300 * @dev: the port to enable
301 *
302 * Performs the MAC and PHY actions needed to enable a port.
303 */
304static int link_start(struct net_device *dev)
305{
306 int ret;
307 struct port_info *pi = netdev_priv(dev);
308
309 /*
310 * We do not set address filters and promiscuity here, the stack does
311 * that step explicitly.
312 */
313 ret = t4_set_rxmode(pi->adapter, 0, pi->viid, dev->mtu, -1, -1, -1,
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +0000314 pi->vlan_grp != NULL, true);
Dimitris Michailidisb8ff05a92010-04-01 15:28:26 +0000315 if (ret == 0) {
316 ret = t4_change_mac(pi->adapter, 0, pi->viid,
317 pi->xact_addr_filt, dev->dev_addr, true,
Dimitris Michailidisb6bd29e2010-05-18 10:07:11 +0000318 true);
Dimitris Michailidisb8ff05a92010-04-01 15:28:26 +0000319 if (ret >= 0) {
320 pi->xact_addr_filt = ret;
321 ret = 0;
322 }
323 }
324 if (ret == 0)
325 ret = t4_link_start(pi->adapter, 0, pi->tx_chan, &pi->link_cfg);
326 if (ret == 0)
327 ret = t4_enable_vi(pi->adapter, 0, pi->viid, true, true);
328 return ret;
329}
330
331/*
332 * Response queue handler for the FW event queue.
333 */
334static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
335 const struct pkt_gl *gl)
336{
337 u8 opcode = ((const struct rss_header *)rsp)->opcode;
338
339 rsp++; /* skip RSS header */
340 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
341 const struct cpl_sge_egr_update *p = (void *)rsp;
342 unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
343 struct sge_txq *txq = q->adap->sge.egr_map[qid];
344
345 txq->restarts++;
346 if ((u8 *)txq < (u8 *)q->adap->sge.ethrxq) {
347 struct sge_eth_txq *eq;
348
349 eq = container_of(txq, struct sge_eth_txq, q);
350 netif_tx_wake_queue(eq->txq);
351 } else {
352 struct sge_ofld_txq *oq;
353
354 oq = container_of(txq, struct sge_ofld_txq, q);
355 tasklet_schedule(&oq->qresume_tsk);
356 }
357 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
358 const struct cpl_fw6_msg *p = (void *)rsp;
359
360 if (p->type == 0)
361 t4_handle_fw_rpl(q->adap, p->data);
362 } else if (opcode == CPL_L2T_WRITE_RPL) {
363 const struct cpl_l2t_write_rpl *p = (void *)rsp;
364
365 do_l2t_write_rpl(q->adap, p);
366 } else
367 dev_err(q->adap->pdev_dev,
368 "unexpected CPL %#x on FW event queue\n", opcode);
369 return 0;
370}
371
372/**
373 * uldrx_handler - response queue handler for ULD queues
374 * @q: the response queue that received the packet
375 * @rsp: the response queue descriptor holding the offload message
376 * @gl: the gather list of packet fragments
377 *
378 * Deliver an ingress offload packet to a ULD. All processing is done by
379 * the ULD, we just maintain statistics.
380 */
381static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
382 const struct pkt_gl *gl)
383{
384 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
385
386 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
387 rxq->stats.nomem++;
388 return -1;
389 }
390 if (gl == NULL)
391 rxq->stats.imm++;
392 else if (gl == CXGB4_MSG_AN)
393 rxq->stats.an++;
394 else
395 rxq->stats.pkts++;
396 return 0;
397}
398
399static void disable_msi(struct adapter *adapter)
400{
401 if (adapter->flags & USING_MSIX) {
402 pci_disable_msix(adapter->pdev);
403 adapter->flags &= ~USING_MSIX;
404 } else if (adapter->flags & USING_MSI) {
405 pci_disable_msi(adapter->pdev);
406 adapter->flags &= ~USING_MSI;
407 }
408}
409
410/*
411 * Interrupt handler for non-data events used with MSI-X.
412 */
413static irqreturn_t t4_nondata_intr(int irq, void *cookie)
414{
415 struct adapter *adap = cookie;
416
417 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
418 if (v & PFSW) {
419 adap->swintr = 1;
420 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
421 }
422 t4_slow_intr_handler(adap);
423 return IRQ_HANDLED;
424}
425
426/*
427 * Name the MSI-X interrupts.
428 */
429static void name_msix_vecs(struct adapter *adap)
430{
431 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc) - 1;
432
433 /* non-data interrupts */
434 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
435 adap->msix_info[0].desc[n] = 0;
436
437 /* FW events */
438 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq", adap->name);
439 adap->msix_info[1].desc[n] = 0;
440
441 /* Ethernet queues */
442 for_each_port(adap, j) {
443 struct net_device *d = adap->port[j];
444 const struct port_info *pi = netdev_priv(d);
445
446 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
447 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
448 d->name, i);
449 adap->msix_info[msi_idx].desc[n] = 0;
450 }
451 }
452
453 /* offload queues */
454 for_each_ofldrxq(&adap->sge, i) {
455 snprintf(adap->msix_info[msi_idx].desc, n, "%s-ofld%d",
456 adap->name, i);
457 adap->msix_info[msi_idx++].desc[n] = 0;
458 }
459 for_each_rdmarxq(&adap->sge, i) {
460 snprintf(adap->msix_info[msi_idx].desc, n, "%s-rdma%d",
461 adap->name, i);
462 adap->msix_info[msi_idx++].desc[n] = 0;
463 }
464}
465
466static int request_msix_queue_irqs(struct adapter *adap)
467{
468 struct sge *s = &adap->sge;
469 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi = 2;
470
471 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
472 adap->msix_info[1].desc, &s->fw_evtq);
473 if (err)
474 return err;
475
476 for_each_ethrxq(s, ethqidx) {
477 err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
478 adap->msix_info[msi].desc,
479 &s->ethrxq[ethqidx].rspq);
480 if (err)
481 goto unwind;
482 msi++;
483 }
484 for_each_ofldrxq(s, ofldqidx) {
485 err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
486 adap->msix_info[msi].desc,
487 &s->ofldrxq[ofldqidx].rspq);
488 if (err)
489 goto unwind;
490 msi++;
491 }
492 for_each_rdmarxq(s, rdmaqidx) {
493 err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
494 adap->msix_info[msi].desc,
495 &s->rdmarxq[rdmaqidx].rspq);
496 if (err)
497 goto unwind;
498 msi++;
499 }
500 return 0;
501
502unwind:
503 while (--rdmaqidx >= 0)
504 free_irq(adap->msix_info[--msi].vec,
505 &s->rdmarxq[rdmaqidx].rspq);
506 while (--ofldqidx >= 0)
507 free_irq(adap->msix_info[--msi].vec,
508 &s->ofldrxq[ofldqidx].rspq);
509 while (--ethqidx >= 0)
510 free_irq(adap->msix_info[--msi].vec, &s->ethrxq[ethqidx].rspq);
511 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
512 return err;
513}
514
515static void free_msix_queue_irqs(struct adapter *adap)
516{
517 int i, msi = 2;
518 struct sge *s = &adap->sge;
519
520 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
521 for_each_ethrxq(s, i)
522 free_irq(adap->msix_info[msi++].vec, &s->ethrxq[i].rspq);
523 for_each_ofldrxq(s, i)
524 free_irq(adap->msix_info[msi++].vec, &s->ofldrxq[i].rspq);
525 for_each_rdmarxq(s, i)
526 free_irq(adap->msix_info[msi++].vec, &s->rdmarxq[i].rspq);
527}
528
529/**
530 * setup_rss - configure RSS
531 * @adap: the adapter
532 *
533 * Sets up RSS to distribute packets to multiple receive queues. We
534 * configure the RSS CPU lookup table to distribute to the number of HW
535 * receive queues, and the response queue lookup table to narrow that
536 * down to the response queues actually configured for each port.
537 * We always configure the RSS mapping for all ports since the mapping
538 * table has plenty of entries.
539 */
540static int setup_rss(struct adapter *adap)
541{
542 int i, j, err;
543 u16 rss[MAX_ETH_QSETS];
544
545 for_each_port(adap, i) {
546 const struct port_info *pi = adap2pinfo(adap, i);
547 const struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
548
549 for (j = 0; j < pi->nqsets; j++)
550 rss[j] = q[j].rspq.abs_id;
551
552 err = t4_config_rss_range(adap, 0, pi->viid, 0, pi->rss_size,
553 rss, pi->nqsets);
554 if (err)
555 return err;
556 }
557 return 0;
558}
559
560/*
561 * Wait until all NAPI handlers are descheduled.
562 */
563static void quiesce_rx(struct adapter *adap)
564{
565 int i;
566
567 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
568 struct sge_rspq *q = adap->sge.ingr_map[i];
569
570 if (q && q->handler)
571 napi_disable(&q->napi);
572 }
573}
574
575/*
576 * Enable NAPI scheduling and interrupt generation for all Rx queues.
577 */
578static void enable_rx(struct adapter *adap)
579{
580 int i;
581
582 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
583 struct sge_rspq *q = adap->sge.ingr_map[i];
584
585 if (!q)
586 continue;
587 if (q->handler)
588 napi_enable(&q->napi);
589 /* 0-increment GTS to start the timer and enable interrupts */
590 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
591 SEINTARM(q->intr_params) |
592 INGRESSQID(q->cntxt_id));
593 }
594}
595
596/**
597 * setup_sge_queues - configure SGE Tx/Rx/response queues
598 * @adap: the adapter
599 *
600 * Determines how many sets of SGE queues to use and initializes them.
601 * We support multiple queue sets per port if we have MSI-X, otherwise
602 * just one queue set per port.
603 */
604static int setup_sge_queues(struct adapter *adap)
605{
606 int err, msi_idx, i, j;
607 struct sge *s = &adap->sge;
608
609 bitmap_zero(s->starving_fl, MAX_EGRQ);
610 bitmap_zero(s->txq_maperr, MAX_EGRQ);
611
612 if (adap->flags & USING_MSIX)
613 msi_idx = 1; /* vector 0 is for non-queue interrupts */
614 else {
615 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
616 NULL, NULL);
617 if (err)
618 return err;
619 msi_idx = -((int)s->intrq.abs_id + 1);
620 }
621
622 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
623 msi_idx, NULL, fwevtq_handler);
624 if (err) {
625freeout: t4_free_sge_resources(adap);
626 return err;
627 }
628
629 for_each_port(adap, i) {
630 struct net_device *dev = adap->port[i];
631 struct port_info *pi = netdev_priv(dev);
632 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
633 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
634
635 for (j = 0; j < pi->nqsets; j++, q++) {
636 if (msi_idx > 0)
637 msi_idx++;
638 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
639 msi_idx, &q->fl,
640 t4_ethrx_handler);
641 if (err)
642 goto freeout;
643 q->rspq.idx = j;
644 memset(&q->stats, 0, sizeof(q->stats));
645 }
646 for (j = 0; j < pi->nqsets; j++, t++) {
647 err = t4_sge_alloc_eth_txq(adap, t, dev,
648 netdev_get_tx_queue(dev, j),
649 s->fw_evtq.cntxt_id);
650 if (err)
651 goto freeout;
652 }
653 }
654
655 j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
656 for_each_ofldrxq(s, i) {
657 struct sge_ofld_rxq *q = &s->ofldrxq[i];
658 struct net_device *dev = adap->port[i / j];
659
660 if (msi_idx > 0)
661 msi_idx++;
662 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
663 &q->fl, uldrx_handler);
664 if (err)
665 goto freeout;
666 memset(&q->stats, 0, sizeof(q->stats));
667 s->ofld_rxq[i] = q->rspq.abs_id;
668 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
669 s->fw_evtq.cntxt_id);
670 if (err)
671 goto freeout;
672 }
673
674 for_each_rdmarxq(s, i) {
675 struct sge_ofld_rxq *q = &s->rdmarxq[i];
676
677 if (msi_idx > 0)
678 msi_idx++;
679 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
680 msi_idx, &q->fl, uldrx_handler);
681 if (err)
682 goto freeout;
683 memset(&q->stats, 0, sizeof(q->stats));
684 s->rdma_rxq[i] = q->rspq.abs_id;
685 }
686
687 for_each_port(adap, i) {
688 /*
689 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
690 * have RDMA queues, and that's the right value.
691 */
692 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
693 s->fw_evtq.cntxt_id,
694 s->rdmarxq[i].rspq.cntxt_id);
695 if (err)
696 goto freeout;
697 }
698
699 t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
700 RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
701 QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
702 return 0;
703}
704
705/*
706 * Returns 0 if new FW was successfully loaded, a positive errno if a load was
707 * started but failed, and a negative errno if flash load couldn't start.
708 */
709static int upgrade_fw(struct adapter *adap)
710{
711 int ret;
712 u32 vers;
713 const struct fw_hdr *hdr;
714 const struct firmware *fw;
715 struct device *dev = adap->pdev_dev;
716
717 ret = request_firmware(&fw, FW_FNAME, dev);
718 if (ret < 0) {
719 dev_err(dev, "unable to load firmware image " FW_FNAME
720 ", error %d\n", ret);
721 return ret;
722 }
723
724 hdr = (const struct fw_hdr *)fw->data;
725 vers = ntohl(hdr->fw_ver);
726 if (FW_HDR_FW_VER_MAJOR_GET(vers) != FW_VERSION_MAJOR) {
727 ret = -EINVAL; /* wrong major version, won't do */
728 goto out;
729 }
730
731 /*
732 * If the flash FW is unusable or we found something newer, load it.
733 */
734 if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != FW_VERSION_MAJOR ||
735 vers > adap->params.fw_vers) {
736 ret = -t4_load_fw(adap, fw->data, fw->size);
737 if (!ret)
738 dev_info(dev, "firmware upgraded to version %pI4 from "
739 FW_FNAME "\n", &hdr->fw_ver);
740 }
741out: release_firmware(fw);
742 return ret;
743}
744
745/*
746 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
747 * The allocated memory is cleared.
748 */
749void *t4_alloc_mem(size_t size)
750{
751 void *p = kmalloc(size, GFP_KERNEL);
752
753 if (!p)
754 p = vmalloc(size);
755 if (p)
756 memset(p, 0, size);
757 return p;
758}
759
760/*
761 * Free memory allocated through alloc_mem().
762 */
763void t4_free_mem(void *addr)
764{
765 if (is_vmalloc_addr(addr))
766 vfree(addr);
767 else
768 kfree(addr);
769}
770
771static inline int is_offload(const struct adapter *adap)
772{
773 return adap->params.offload;
774}
775
776/*
777 * Implementation of ethtool operations.
778 */
779
780static u32 get_msglevel(struct net_device *dev)
781{
782 return netdev2adap(dev)->msg_enable;
783}
784
785static void set_msglevel(struct net_device *dev, u32 val)
786{
787 netdev2adap(dev)->msg_enable = val;
788}
789
790static char stats_strings[][ETH_GSTRING_LEN] = {
791 "TxOctetsOK ",
792 "TxFramesOK ",
793 "TxBroadcastFrames ",
794 "TxMulticastFrames ",
795 "TxUnicastFrames ",
796 "TxErrorFrames ",
797
798 "TxFrames64 ",
799 "TxFrames65To127 ",
800 "TxFrames128To255 ",
801 "TxFrames256To511 ",
802 "TxFrames512To1023 ",
803 "TxFrames1024To1518 ",
804 "TxFrames1519ToMax ",
805
806 "TxFramesDropped ",
807 "TxPauseFrames ",
808 "TxPPP0Frames ",
809 "TxPPP1Frames ",
810 "TxPPP2Frames ",
811 "TxPPP3Frames ",
812 "TxPPP4Frames ",
813 "TxPPP5Frames ",
814 "TxPPP6Frames ",
815 "TxPPP7Frames ",
816
817 "RxOctetsOK ",
818 "RxFramesOK ",
819 "RxBroadcastFrames ",
820 "RxMulticastFrames ",
821 "RxUnicastFrames ",
822
823 "RxFramesTooLong ",
824 "RxJabberErrors ",
825 "RxFCSErrors ",
826 "RxLengthErrors ",
827 "RxSymbolErrors ",
828 "RxRuntFrames ",
829
830 "RxFrames64 ",
831 "RxFrames65To127 ",
832 "RxFrames128To255 ",
833 "RxFrames256To511 ",
834 "RxFrames512To1023 ",
835 "RxFrames1024To1518 ",
836 "RxFrames1519ToMax ",
837
838 "RxPauseFrames ",
839 "RxPPP0Frames ",
840 "RxPPP1Frames ",
841 "RxPPP2Frames ",
842 "RxPPP3Frames ",
843 "RxPPP4Frames ",
844 "RxPPP5Frames ",
845 "RxPPP6Frames ",
846 "RxPPP7Frames ",
847
848 "RxBG0FramesDropped ",
849 "RxBG1FramesDropped ",
850 "RxBG2FramesDropped ",
851 "RxBG3FramesDropped ",
852 "RxBG0FramesTrunc ",
853 "RxBG1FramesTrunc ",
854 "RxBG2FramesTrunc ",
855 "RxBG3FramesTrunc ",
856
857 "TSO ",
858 "TxCsumOffload ",
859 "RxCsumGood ",
860 "VLANextractions ",
861 "VLANinsertions ",
Dimitris Michailidis4a6346d2010-05-10 15:58:09 +0000862 "GROpackets ",
863 "GROmerged ",
Dimitris Michailidisb8ff05a92010-04-01 15:28:26 +0000864};
865
866static int get_sset_count(struct net_device *dev, int sset)
867{
868 switch (sset) {
869 case ETH_SS_STATS:
870 return ARRAY_SIZE(stats_strings);
871 default:
872 return -EOPNOTSUPP;
873 }
874}
875
876#define T4_REGMAP_SIZE (160 * 1024)
877
878static int get_regs_len(struct net_device *dev)
879{
880 return T4_REGMAP_SIZE;
881}
882
883static int get_eeprom_len(struct net_device *dev)
884{
885 return EEPROMSIZE;
886}
887
888static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
889{
890 struct adapter *adapter = netdev2adap(dev);
891
892 strcpy(info->driver, KBUILD_MODNAME);
893 strcpy(info->version, DRV_VERSION);
894 strcpy(info->bus_info, pci_name(adapter->pdev));
895
896 if (!adapter->params.fw_vers)
897 strcpy(info->fw_version, "N/A");
898 else
899 snprintf(info->fw_version, sizeof(info->fw_version),
900 "%u.%u.%u.%u, TP %u.%u.%u.%u",
901 FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
902 FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
903 FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
904 FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
905 FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
906 FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
907 FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
908 FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
909}
910
911static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
912{
913 if (stringset == ETH_SS_STATS)
914 memcpy(data, stats_strings, sizeof(stats_strings));
915}
916
917/*
918 * port stats maintained per queue of the port. They should be in the same
919 * order as in stats_strings above.
920 */
921struct queue_port_stats {
922 u64 tso;
923 u64 tx_csum;
924 u64 rx_csum;
925 u64 vlan_ex;
926 u64 vlan_ins;
Dimitris Michailidis4a6346d2010-05-10 15:58:09 +0000927 u64 gro_pkts;
928 u64 gro_merged;
Dimitris Michailidisb8ff05a92010-04-01 15:28:26 +0000929};
930
931static void collect_sge_port_stats(const struct adapter *adap,
932 const struct port_info *p, struct queue_port_stats *s)
933{
934 int i;
935 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
936 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
937
938 memset(s, 0, sizeof(*s));
939 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
940 s->tso += tx->tso;
941 s->tx_csum += tx->tx_cso;
942 s->rx_csum += rx->stats.rx_cso;
943 s->vlan_ex += rx->stats.vlan_ex;
944 s->vlan_ins += tx->vlan_ins;
Dimitris Michailidis4a6346d2010-05-10 15:58:09 +0000945 s->gro_pkts += rx->stats.lro_pkts;
946 s->gro_merged += rx->stats.lro_merged;
Dimitris Michailidisb8ff05a92010-04-01 15:28:26 +0000947 }
948}
949
950static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
951 u64 *data)
952{
953 struct port_info *pi = netdev_priv(dev);
954 struct adapter *adapter = pi->adapter;
955
956 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
957
958 data += sizeof(struct port_stats) / sizeof(u64);
959 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
960}
961
962/*
963 * Return a version number to identify the type of adapter. The scheme is:
964 * - bits 0..9: chip version
965 * - bits 10..15: chip revision
966 */
967static inline unsigned int mk_adap_vers(const struct adapter *ap)
968{
969 return 4 | (ap->params.rev << 10);
970}
971
972static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
973 unsigned int end)
974{
975 u32 *p = buf + start;
976
977 for ( ; start <= end; start += sizeof(u32))
978 *p++ = t4_read_reg(ap, start);
979}
980
981static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
982 void *buf)
983{
984 static const unsigned int reg_ranges[] = {
985 0x1008, 0x1108,
986 0x1180, 0x11b4,
987 0x11fc, 0x123c,
988 0x1300, 0x173c,
989 0x1800, 0x18fc,
990 0x3000, 0x30d8,
991 0x30e0, 0x5924,
992 0x5960, 0x59d4,
993 0x5a00, 0x5af8,
994 0x6000, 0x6098,
995 0x6100, 0x6150,
996 0x6200, 0x6208,
997 0x6240, 0x6248,
998 0x6280, 0x6338,
999 0x6370, 0x638c,
1000 0x6400, 0x643c,
1001 0x6500, 0x6524,
1002 0x6a00, 0x6a38,
1003 0x6a60, 0x6a78,
1004 0x6b00, 0x6b84,
1005 0x6bf0, 0x6c84,
1006 0x6cf0, 0x6d84,
1007 0x6df0, 0x6e84,
1008 0x6ef0, 0x6f84,
1009 0x6ff0, 0x7084,
1010 0x70f0, 0x7184,
1011 0x71f0, 0x7284,
1012 0x72f0, 0x7384,
1013 0x73f0, 0x7450,
1014 0x7500, 0x7530,
1015 0x7600, 0x761c,
1016 0x7680, 0x76cc,
1017 0x7700, 0x7798,
1018 0x77c0, 0x77fc,
1019 0x7900, 0x79fc,
1020 0x7b00, 0x7c38,
1021 0x7d00, 0x7efc,
1022 0x8dc0, 0x8e1c,
1023 0x8e30, 0x8e78,
1024 0x8ea0, 0x8f6c,
1025 0x8fc0, 0x9074,
1026 0x90fc, 0x90fc,
1027 0x9400, 0x9458,
1028 0x9600, 0x96bc,
1029 0x9800, 0x9808,
1030 0x9820, 0x983c,
1031 0x9850, 0x9864,
1032 0x9c00, 0x9c6c,
1033 0x9c80, 0x9cec,
1034 0x9d00, 0x9d6c,
1035 0x9d80, 0x9dec,
1036 0x9e00, 0x9e6c,
1037 0x9e80, 0x9eec,
1038 0x9f00, 0x9f6c,
1039 0x9f80, 0x9fec,
1040 0xd004, 0xd03c,
1041 0xdfc0, 0xdfe0,
1042 0xe000, 0xea7c,
1043 0xf000, 0x11190,
1044 0x19040, 0x19124,
1045 0x19150, 0x191b0,
1046 0x191d0, 0x191e8,
1047 0x19238, 0x1924c,
1048 0x193f8, 0x19474,
1049 0x19490, 0x194f8,
1050 0x19800, 0x19f30,
1051 0x1a000, 0x1a06c,
1052 0x1a0b0, 0x1a120,
1053 0x1a128, 0x1a138,
1054 0x1a190, 0x1a1c4,
1055 0x1a1fc, 0x1a1fc,
1056 0x1e040, 0x1e04c,
1057 0x1e240, 0x1e28c,
1058 0x1e2c0, 0x1e2c0,
1059 0x1e2e0, 0x1e2e0,
1060 0x1e300, 0x1e384,
1061 0x1e3c0, 0x1e3c8,
1062 0x1e440, 0x1e44c,
1063 0x1e640, 0x1e68c,
1064 0x1e6c0, 0x1e6c0,
1065 0x1e6e0, 0x1e6e0,
1066 0x1e700, 0x1e784,
1067 0x1e7c0, 0x1e7c8,
1068 0x1e840, 0x1e84c,
1069 0x1ea40, 0x1ea8c,
1070 0x1eac0, 0x1eac0,
1071 0x1eae0, 0x1eae0,
1072 0x1eb00, 0x1eb84,
1073 0x1ebc0, 0x1ebc8,
1074 0x1ec40, 0x1ec4c,
1075 0x1ee40, 0x1ee8c,
1076 0x1eec0, 0x1eec0,
1077 0x1eee0, 0x1eee0,
1078 0x1ef00, 0x1ef84,
1079 0x1efc0, 0x1efc8,
1080 0x1f040, 0x1f04c,
1081 0x1f240, 0x1f28c,
1082 0x1f2c0, 0x1f2c0,
1083 0x1f2e0, 0x1f2e0,
1084 0x1f300, 0x1f384,
1085 0x1f3c0, 0x1f3c8,
1086 0x1f440, 0x1f44c,
1087 0x1f640, 0x1f68c,
1088 0x1f6c0, 0x1f6c0,
1089 0x1f6e0, 0x1f6e0,
1090 0x1f700, 0x1f784,
1091 0x1f7c0, 0x1f7c8,
1092 0x1f840, 0x1f84c,
1093 0x1fa40, 0x1fa8c,
1094 0x1fac0, 0x1fac0,
1095 0x1fae0, 0x1fae0,
1096 0x1fb00, 0x1fb84,
1097 0x1fbc0, 0x1fbc8,
1098 0x1fc40, 0x1fc4c,
1099 0x1fe40, 0x1fe8c,
1100 0x1fec0, 0x1fec0,
1101 0x1fee0, 0x1fee0,
1102 0x1ff00, 0x1ff84,
1103 0x1ffc0, 0x1ffc8,
1104 0x20000, 0x2002c,
1105 0x20100, 0x2013c,
1106 0x20190, 0x201c8,
1107 0x20200, 0x20318,
1108 0x20400, 0x20528,
1109 0x20540, 0x20614,
1110 0x21000, 0x21040,
1111 0x2104c, 0x21060,
1112 0x210c0, 0x210ec,
1113 0x21200, 0x21268,
1114 0x21270, 0x21284,
1115 0x212fc, 0x21388,
1116 0x21400, 0x21404,
1117 0x21500, 0x21518,
1118 0x2152c, 0x2153c,
1119 0x21550, 0x21554,
1120 0x21600, 0x21600,
1121 0x21608, 0x21628,
1122 0x21630, 0x2163c,
1123 0x21700, 0x2171c,
1124 0x21780, 0x2178c,
1125 0x21800, 0x21c38,
1126 0x21c80, 0x21d7c,
1127 0x21e00, 0x21e04,
1128 0x22000, 0x2202c,
1129 0x22100, 0x2213c,
1130 0x22190, 0x221c8,
1131 0x22200, 0x22318,
1132 0x22400, 0x22528,
1133 0x22540, 0x22614,
1134 0x23000, 0x23040,
1135 0x2304c, 0x23060,
1136 0x230c0, 0x230ec,
1137 0x23200, 0x23268,
1138 0x23270, 0x23284,
1139 0x232fc, 0x23388,
1140 0x23400, 0x23404,
1141 0x23500, 0x23518,
1142 0x2352c, 0x2353c,
1143 0x23550, 0x23554,
1144 0x23600, 0x23600,
1145 0x23608, 0x23628,
1146 0x23630, 0x2363c,
1147 0x23700, 0x2371c,
1148 0x23780, 0x2378c,
1149 0x23800, 0x23c38,
1150 0x23c80, 0x23d7c,
1151 0x23e00, 0x23e04,
1152 0x24000, 0x2402c,
1153 0x24100, 0x2413c,
1154 0x24190, 0x241c8,
1155 0x24200, 0x24318,
1156 0x24400, 0x24528,
1157 0x24540, 0x24614,
1158 0x25000, 0x25040,
1159 0x2504c, 0x25060,
1160 0x250c0, 0x250ec,
1161 0x25200, 0x25268,
1162 0x25270, 0x25284,
1163 0x252fc, 0x25388,
1164 0x25400, 0x25404,
1165 0x25500, 0x25518,
1166 0x2552c, 0x2553c,
1167 0x25550, 0x25554,
1168 0x25600, 0x25600,
1169 0x25608, 0x25628,
1170 0x25630, 0x2563c,
1171 0x25700, 0x2571c,
1172 0x25780, 0x2578c,
1173 0x25800, 0x25c38,
1174 0x25c80, 0x25d7c,
1175 0x25e00, 0x25e04,
1176 0x26000, 0x2602c,
1177 0x26100, 0x2613c,
1178 0x26190, 0x261c8,
1179 0x26200, 0x26318,
1180 0x26400, 0x26528,
1181 0x26540, 0x26614,
1182 0x27000, 0x27040,
1183 0x2704c, 0x27060,
1184 0x270c0, 0x270ec,
1185 0x27200, 0x27268,
1186 0x27270, 0x27284,
1187 0x272fc, 0x27388,
1188 0x27400, 0x27404,
1189 0x27500, 0x27518,
1190 0x2752c, 0x2753c,
1191 0x27550, 0x27554,
1192 0x27600, 0x27600,
1193 0x27608, 0x27628,
1194 0x27630, 0x2763c,
1195 0x27700, 0x2771c,
1196 0x27780, 0x2778c,
1197 0x27800, 0x27c38,
1198 0x27c80, 0x27d7c,
1199 0x27e00, 0x27e04
1200 };
1201
1202 int i;
1203 struct adapter *ap = netdev2adap(dev);
1204
1205 regs->version = mk_adap_vers(ap);
1206
1207 memset(buf, 0, T4_REGMAP_SIZE);
1208 for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2)
1209 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
1210}
1211
1212static int restart_autoneg(struct net_device *dev)
1213{
1214 struct port_info *p = netdev_priv(dev);
1215
1216 if (!netif_running(dev))
1217 return -EAGAIN;
1218 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
1219 return -EINVAL;
1220 t4_restart_aneg(p->adapter, 0, p->tx_chan);
1221 return 0;
1222}
1223
1224static int identify_port(struct net_device *dev, u32 data)
1225{
1226 if (data == 0)
1227 data = 2; /* default to 2 seconds */
1228
1229 return t4_identify_port(netdev2adap(dev), 0, netdev2pinfo(dev)->viid,
1230 data * 5);
1231}
1232
1233static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
1234{
1235 unsigned int v = 0;
1236
1237 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XAUI) {
1238 v |= SUPPORTED_TP;
1239 if (caps & FW_PORT_CAP_SPEED_100M)
1240 v |= SUPPORTED_100baseT_Full;
1241 if (caps & FW_PORT_CAP_SPEED_1G)
1242 v |= SUPPORTED_1000baseT_Full;
1243 if (caps & FW_PORT_CAP_SPEED_10G)
1244 v |= SUPPORTED_10000baseT_Full;
1245 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
1246 v |= SUPPORTED_Backplane;
1247 if (caps & FW_PORT_CAP_SPEED_1G)
1248 v |= SUPPORTED_1000baseKX_Full;
1249 if (caps & FW_PORT_CAP_SPEED_10G)
1250 v |= SUPPORTED_10000baseKX4_Full;
1251 } else if (type == FW_PORT_TYPE_KR)
1252 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
1253 else if (type == FW_PORT_TYPE_FIBER)
1254 v |= SUPPORTED_FIBRE;
1255
1256 if (caps & FW_PORT_CAP_ANEG)
1257 v |= SUPPORTED_Autoneg;
1258 return v;
1259}
1260
1261static unsigned int to_fw_linkcaps(unsigned int caps)
1262{
1263 unsigned int v = 0;
1264
1265 if (caps & ADVERTISED_100baseT_Full)
1266 v |= FW_PORT_CAP_SPEED_100M;
1267 if (caps & ADVERTISED_1000baseT_Full)
1268 v |= FW_PORT_CAP_SPEED_1G;
1269 if (caps & ADVERTISED_10000baseT_Full)
1270 v |= FW_PORT_CAP_SPEED_10G;
1271 return v;
1272}
1273
1274static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1275{
1276 const struct port_info *p = netdev_priv(dev);
1277
1278 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
1279 p->port_type == FW_PORT_TYPE_BT_XAUI)
1280 cmd->port = PORT_TP;
1281 else if (p->port_type == FW_PORT_TYPE_FIBER)
1282 cmd->port = PORT_FIBRE;
1283 else if (p->port_type == FW_PORT_TYPE_TWINAX)
1284 cmd->port = PORT_DA;
1285 else
1286 cmd->port = PORT_OTHER;
1287
1288 if (p->mdio_addr >= 0) {
1289 cmd->phy_address = p->mdio_addr;
1290 cmd->transceiver = XCVR_EXTERNAL;
1291 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
1292 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
1293 } else {
1294 cmd->phy_address = 0; /* not really, but no better option */
1295 cmd->transceiver = XCVR_INTERNAL;
1296 cmd->mdio_support = 0;
1297 }
1298
1299 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
1300 cmd->advertising = from_fw_linkcaps(p->port_type,
1301 p->link_cfg.advertising);
1302 cmd->speed = netif_carrier_ok(dev) ? p->link_cfg.speed : 0;
1303 cmd->duplex = DUPLEX_FULL;
1304 cmd->autoneg = p->link_cfg.autoneg;
1305 cmd->maxtxpkt = 0;
1306 cmd->maxrxpkt = 0;
1307 return 0;
1308}
1309
1310static unsigned int speed_to_caps(int speed)
1311{
1312 if (speed == SPEED_100)
1313 return FW_PORT_CAP_SPEED_100M;
1314 if (speed == SPEED_1000)
1315 return FW_PORT_CAP_SPEED_1G;
1316 if (speed == SPEED_10000)
1317 return FW_PORT_CAP_SPEED_10G;
1318 return 0;
1319}
1320
1321static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1322{
1323 unsigned int cap;
1324 struct port_info *p = netdev_priv(dev);
1325 struct link_config *lc = &p->link_cfg;
1326
1327 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
1328 return -EINVAL;
1329
1330 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1331 /*
1332 * PHY offers a single speed. See if that's what's
1333 * being requested.
1334 */
1335 if (cmd->autoneg == AUTONEG_DISABLE &&
1336 (lc->supported & speed_to_caps(cmd->speed)))
1337 return 0;
1338 return -EINVAL;
1339 }
1340
1341 if (cmd->autoneg == AUTONEG_DISABLE) {
1342 cap = speed_to_caps(cmd->speed);
1343
1344 if (!(lc->supported & cap) || cmd->speed == SPEED_1000 ||
1345 cmd->speed == SPEED_10000)
1346 return -EINVAL;
1347 lc->requested_speed = cap;
1348 lc->advertising = 0;
1349 } else {
1350 cap = to_fw_linkcaps(cmd->advertising);
1351 if (!(lc->supported & cap))
1352 return -EINVAL;
1353 lc->requested_speed = 0;
1354 lc->advertising = cap | FW_PORT_CAP_ANEG;
1355 }
1356 lc->autoneg = cmd->autoneg;
1357
1358 if (netif_running(dev))
1359 return t4_link_start(p->adapter, 0, p->tx_chan, lc);
1360 return 0;
1361}
1362
1363static void get_pauseparam(struct net_device *dev,
1364 struct ethtool_pauseparam *epause)
1365{
1366 struct port_info *p = netdev_priv(dev);
1367
1368 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
1369 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
1370 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
1371}
1372
1373static int set_pauseparam(struct net_device *dev,
1374 struct ethtool_pauseparam *epause)
1375{
1376 struct port_info *p = netdev_priv(dev);
1377 struct link_config *lc = &p->link_cfg;
1378
1379 if (epause->autoneg == AUTONEG_DISABLE)
1380 lc->requested_fc = 0;
1381 else if (lc->supported & FW_PORT_CAP_ANEG)
1382 lc->requested_fc = PAUSE_AUTONEG;
1383 else
1384 return -EINVAL;
1385
1386 if (epause->rx_pause)
1387 lc->requested_fc |= PAUSE_RX;
1388 if (epause->tx_pause)
1389 lc->requested_fc |= PAUSE_TX;
1390 if (netif_running(dev))
1391 return t4_link_start(p->adapter, 0, p->tx_chan, lc);
1392 return 0;
1393}
1394
1395static u32 get_rx_csum(struct net_device *dev)
1396{
1397 struct port_info *p = netdev_priv(dev);
1398
1399 return p->rx_offload & RX_CSO;
1400}
1401
1402static int set_rx_csum(struct net_device *dev, u32 data)
1403{
1404 struct port_info *p = netdev_priv(dev);
1405
1406 if (data)
1407 p->rx_offload |= RX_CSO;
1408 else
1409 p->rx_offload &= ~RX_CSO;
1410 return 0;
1411}
1412
1413static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1414{
1415 const struct port_info *pi = netdev_priv(dev);
1416 const struct sge *s = &pi->adapter->sge;
1417
1418 e->rx_max_pending = MAX_RX_BUFFERS;
1419 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
1420 e->rx_jumbo_max_pending = 0;
1421 e->tx_max_pending = MAX_TXQ_ENTRIES;
1422
1423 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
1424 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
1425 e->rx_jumbo_pending = 0;
1426 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
1427}
1428
1429static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1430{
1431 int i;
1432 const struct port_info *pi = netdev_priv(dev);
1433 struct adapter *adapter = pi->adapter;
1434 struct sge *s = &adapter->sge;
1435
1436 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
1437 e->tx_pending > MAX_TXQ_ENTRIES ||
1438 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1439 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1440 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
1441 return -EINVAL;
1442
1443 if (adapter->flags & FULL_INIT_DONE)
1444 return -EBUSY;
1445
1446 for (i = 0; i < pi->nqsets; ++i) {
1447 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
1448 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
1449 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
1450 }
1451 return 0;
1452}
1453
1454static int closest_timer(const struct sge *s, int time)
1455{
1456 int i, delta, match = 0, min_delta = INT_MAX;
1457
1458 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
1459 delta = time - s->timer_val[i];
1460 if (delta < 0)
1461 delta = -delta;
1462 if (delta < min_delta) {
1463 min_delta = delta;
1464 match = i;
1465 }
1466 }
1467 return match;
1468}
1469
1470static int closest_thres(const struct sge *s, int thres)
1471{
1472 int i, delta, match = 0, min_delta = INT_MAX;
1473
1474 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
1475 delta = thres - s->counter_val[i];
1476 if (delta < 0)
1477 delta = -delta;
1478 if (delta < min_delta) {
1479 min_delta = delta;
1480 match = i;
1481 }
1482 }
1483 return match;
1484}
1485
1486/*
1487 * Return a queue's interrupt hold-off time in us. 0 means no timer.
1488 */
1489static unsigned int qtimer_val(const struct adapter *adap,
1490 const struct sge_rspq *q)
1491{
1492 unsigned int idx = q->intr_params >> 1;
1493
1494 return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
1495}
1496
1497/**
1498 * set_rxq_intr_params - set a queue's interrupt holdoff parameters
1499 * @adap: the adapter
1500 * @q: the Rx queue
1501 * @us: the hold-off time in us, or 0 to disable timer
1502 * @cnt: the hold-off packet count, or 0 to disable counter
1503 *
1504 * Sets an Rx queue's interrupt hold-off time and packet count. At least
1505 * one of the two needs to be enabled for the queue to generate interrupts.
1506 */
1507static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
1508 unsigned int us, unsigned int cnt)
1509{
1510 if ((us | cnt) == 0)
1511 cnt = 1;
1512
1513 if (cnt) {
1514 int err;
1515 u32 v, new_idx;
1516
1517 new_idx = closest_thres(&adap->sge, cnt);
1518 if (q->desc && q->pktcnt_idx != new_idx) {
1519 /* the queue has already been created, update it */
1520 v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
1521 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1522 FW_PARAMS_PARAM_YZ(q->cntxt_id);
1523 err = t4_set_params(adap, 0, 0, 0, 1, &v, &new_idx);
1524 if (err)
1525 return err;
1526 }
1527 q->pktcnt_idx = new_idx;
1528 }
1529
1530 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
1531 q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
1532 return 0;
1533}
1534
1535static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1536{
1537 const struct port_info *pi = netdev_priv(dev);
1538 struct adapter *adap = pi->adapter;
1539
1540 return set_rxq_intr_params(adap, &adap->sge.ethrxq[pi->first_qset].rspq,
1541 c->rx_coalesce_usecs, c->rx_max_coalesced_frames);
1542}
1543
1544static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1545{
1546 const struct port_info *pi = netdev_priv(dev);
1547 const struct adapter *adap = pi->adapter;
1548 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
1549
1550 c->rx_coalesce_usecs = qtimer_val(adap, rq);
1551 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
1552 adap->sge.counter_val[rq->pktcnt_idx] : 0;
1553 return 0;
1554}
1555
1556/*
1557 * Translate a physical EEPROM address to virtual. The first 1K is accessed
1558 * through virtual addresses starting at 31K, the rest is accessed through
1559 * virtual addresses starting at 0. This mapping is correct only for PF0.
1560 */
1561static int eeprom_ptov(unsigned int phys_addr)
1562{
1563 if (phys_addr < 1024)
1564 return phys_addr + (31 << 10);
1565 if (phys_addr < EEPROMSIZE)
1566 return phys_addr - 1024;
1567 return -EINVAL;
1568}
1569
1570/*
1571 * The next two routines implement eeprom read/write from physical addresses.
1572 * The physical->virtual translation is correct only for PF0.
1573 */
1574static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
1575{
1576 int vaddr = eeprom_ptov(phys_addr);
1577
1578 if (vaddr >= 0)
1579 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
1580 return vaddr < 0 ? vaddr : 0;
1581}
1582
1583static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
1584{
1585 int vaddr = eeprom_ptov(phys_addr);
1586
1587 if (vaddr >= 0)
1588 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
1589 return vaddr < 0 ? vaddr : 0;
1590}
1591
1592#define EEPROM_MAGIC 0x38E2F10C
1593
1594static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1595 u8 *data)
1596{
1597 int i, err = 0;
1598 struct adapter *adapter = netdev2adap(dev);
1599
1600 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1601 if (!buf)
1602 return -ENOMEM;
1603
1604 e->magic = EEPROM_MAGIC;
1605 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1606 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
1607
1608 if (!err)
1609 memcpy(data, buf + e->offset, e->len);
1610 kfree(buf);
1611 return err;
1612}
1613
1614static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1615 u8 *data)
1616{
1617 u8 *buf;
1618 int err = 0;
1619 u32 aligned_offset, aligned_len, *p;
1620 struct adapter *adapter = netdev2adap(dev);
1621
1622 if (eeprom->magic != EEPROM_MAGIC)
1623 return -EINVAL;
1624
1625 aligned_offset = eeprom->offset & ~3;
1626 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1627
1628 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1629 /*
1630 * RMW possibly needed for first or last words.
1631 */
1632 buf = kmalloc(aligned_len, GFP_KERNEL);
1633 if (!buf)
1634 return -ENOMEM;
1635 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
1636 if (!err && aligned_len > 4)
1637 err = eeprom_rd_phys(adapter,
1638 aligned_offset + aligned_len - 4,
1639 (u32 *)&buf[aligned_len - 4]);
1640 if (err)
1641 goto out;
1642 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1643 } else
1644 buf = data;
1645
1646 err = t4_seeprom_wp(adapter, false);
1647 if (err)
1648 goto out;
1649
1650 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
1651 err = eeprom_wr_phys(adapter, aligned_offset, *p);
1652 aligned_offset += 4;
1653 }
1654
1655 if (!err)
1656 err = t4_seeprom_wp(adapter, true);
1657out:
1658 if (buf != data)
1659 kfree(buf);
1660 return err;
1661}
1662
1663static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
1664{
1665 int ret;
1666 const struct firmware *fw;
1667 struct adapter *adap = netdev2adap(netdev);
1668
1669 ef->data[sizeof(ef->data) - 1] = '\0';
1670 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
1671 if (ret < 0)
1672 return ret;
1673
1674 ret = t4_load_fw(adap, fw->data, fw->size);
1675 release_firmware(fw);
1676 if (!ret)
1677 dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data);
1678 return ret;
1679}
1680
1681#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
1682#define BCAST_CRC 0xa0ccc1a6
1683
1684static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1685{
1686 wol->supported = WAKE_BCAST | WAKE_MAGIC;
1687 wol->wolopts = netdev2adap(dev)->wol;
1688 memset(&wol->sopass, 0, sizeof(wol->sopass));
1689}
1690
1691static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1692{
1693 int err = 0;
1694 struct port_info *pi = netdev_priv(dev);
1695
1696 if (wol->wolopts & ~WOL_SUPPORTED)
1697 return -EINVAL;
1698 t4_wol_magic_enable(pi->adapter, pi->tx_chan,
1699 (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
1700 if (wol->wolopts & WAKE_BCAST) {
1701 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
1702 ~0ULL, 0, false);
1703 if (!err)
1704 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
1705 ~6ULL, ~0ULL, BCAST_CRC, true);
1706 } else
1707 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
1708 return err;
1709}
1710
1711static int set_tso(struct net_device *dev, u32 value)
1712{
1713 if (value)
1714 dev->features |= NETIF_F_TSO | NETIF_F_TSO6;
1715 else
1716 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
1717 return 0;
1718}
1719
Dimitris Michailidis87b6cf52010-04-27 16:22:42 -07001720static int set_flags(struct net_device *dev, u32 flags)
1721{
1722 if (flags & ~ETH_FLAG_RXHASH)
1723 return -EOPNOTSUPP;
1724
1725 if (flags & ETH_FLAG_RXHASH)
1726 dev->features |= NETIF_F_RXHASH;
1727 else
1728 dev->features &= ~NETIF_F_RXHASH;
1729 return 0;
1730}
1731
Dimitris Michailidisb8ff05a92010-04-01 15:28:26 +00001732static struct ethtool_ops cxgb_ethtool_ops = {
1733 .get_settings = get_settings,
1734 .set_settings = set_settings,
1735 .get_drvinfo = get_drvinfo,
1736 .get_msglevel = get_msglevel,
1737 .set_msglevel = set_msglevel,
1738 .get_ringparam = get_sge_param,
1739 .set_ringparam = set_sge_param,
1740 .get_coalesce = get_coalesce,
1741 .set_coalesce = set_coalesce,
1742 .get_eeprom_len = get_eeprom_len,
1743 .get_eeprom = get_eeprom,
1744 .set_eeprom = set_eeprom,
1745 .get_pauseparam = get_pauseparam,
1746 .set_pauseparam = set_pauseparam,
1747 .get_rx_csum = get_rx_csum,
1748 .set_rx_csum = set_rx_csum,
1749 .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
1750 .set_sg = ethtool_op_set_sg,
1751 .get_link = ethtool_op_get_link,
1752 .get_strings = get_strings,
1753 .phys_id = identify_port,
1754 .nway_reset = restart_autoneg,
1755 .get_sset_count = get_sset_count,
1756 .get_ethtool_stats = get_stats,
1757 .get_regs_len = get_regs_len,
1758 .get_regs = get_regs,
1759 .get_wol = get_wol,
1760 .set_wol = set_wol,
1761 .set_tso = set_tso,
Dimitris Michailidis87b6cf52010-04-27 16:22:42 -07001762 .set_flags = set_flags,
Dimitris Michailidisb8ff05a92010-04-01 15:28:26 +00001763 .flash_device = set_flash,
1764};
1765
1766/*
1767 * debugfs support
1768 */
1769
1770static int mem_open(struct inode *inode, struct file *file)
1771{
1772 file->private_data = inode->i_private;
1773 return 0;
1774}
1775
1776static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
1777 loff_t *ppos)
1778{
1779 loff_t pos = *ppos;
1780 loff_t avail = file->f_path.dentry->d_inode->i_size;
1781 unsigned int mem = (uintptr_t)file->private_data & 3;
1782 struct adapter *adap = file->private_data - mem;
1783
1784 if (pos < 0)
1785 return -EINVAL;
1786 if (pos >= avail)
1787 return 0;
1788 if (count > avail - pos)
1789 count = avail - pos;
1790
1791 while (count) {
1792 size_t len;
1793 int ret, ofst;
1794 __be32 data[16];
1795
1796 if (mem == MEM_MC)
1797 ret = t4_mc_read(adap, pos, data, NULL);
1798 else
1799 ret = t4_edc_read(adap, mem, pos, data, NULL);
1800 if (ret)
1801 return ret;
1802
1803 ofst = pos % sizeof(data);
1804 len = min(count, sizeof(data) - ofst);
1805 if (copy_to_user(buf, (u8 *)data + ofst, len))
1806 return -EFAULT;
1807
1808 buf += len;
1809 pos += len;
1810 count -= len;
1811 }
1812 count = pos - *ppos;
1813 *ppos = pos;
1814 return count;
1815}
1816
1817static const struct file_operations mem_debugfs_fops = {
1818 .owner = THIS_MODULE,
1819 .open = mem_open,
1820 .read = mem_read,
1821};
1822
1823static void __devinit add_debugfs_mem(struct adapter *adap, const char *name,
1824 unsigned int idx, unsigned int size_mb)
1825{
1826 struct dentry *de;
1827
1828 de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
1829 (void *)adap + idx, &mem_debugfs_fops);
1830 if (de && de->d_inode)
1831 de->d_inode->i_size = size_mb << 20;
1832}
1833
1834static int __devinit setup_debugfs(struct adapter *adap)
1835{
1836 int i;
1837
1838 if (IS_ERR_OR_NULL(adap->debugfs_root))
1839 return -1;
1840
1841 i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
1842 if (i & EDRAM0_ENABLE)
1843 add_debugfs_mem(adap, "edc0", MEM_EDC0, 5);
1844 if (i & EDRAM1_ENABLE)
1845 add_debugfs_mem(adap, "edc1", MEM_EDC1, 5);
1846 if (i & EXT_MEM_ENABLE)
1847 add_debugfs_mem(adap, "mc", MEM_MC,
1848 EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR)));
1849 if (adap->l2t)
1850 debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
1851 &t4_l2t_fops);
1852 return 0;
1853}
1854
1855/*
1856 * upper-layer driver support
1857 */
1858
1859/*
1860 * Allocate an active-open TID and set it to the supplied value.
1861 */
1862int cxgb4_alloc_atid(struct tid_info *t, void *data)
1863{
1864 int atid = -1;
1865
1866 spin_lock_bh(&t->atid_lock);
1867 if (t->afree) {
1868 union aopen_entry *p = t->afree;
1869
1870 atid = p - t->atid_tab;
1871 t->afree = p->next;
1872 p->data = data;
1873 t->atids_in_use++;
1874 }
1875 spin_unlock_bh(&t->atid_lock);
1876 return atid;
1877}
1878EXPORT_SYMBOL(cxgb4_alloc_atid);
1879
1880/*
1881 * Release an active-open TID.
1882 */
1883void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
1884{
1885 union aopen_entry *p = &t->atid_tab[atid];
1886
1887 spin_lock_bh(&t->atid_lock);
1888 p->next = t->afree;
1889 t->afree = p;
1890 t->atids_in_use--;
1891 spin_unlock_bh(&t->atid_lock);
1892}
1893EXPORT_SYMBOL(cxgb4_free_atid);
1894
1895/*
1896 * Allocate a server TID and set it to the supplied value.
1897 */
1898int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
1899{
1900 int stid;
1901
1902 spin_lock_bh(&t->stid_lock);
1903 if (family == PF_INET) {
1904 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
1905 if (stid < t->nstids)
1906 __set_bit(stid, t->stid_bmap);
1907 else
1908 stid = -1;
1909 } else {
1910 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
1911 if (stid < 0)
1912 stid = -1;
1913 }
1914 if (stid >= 0) {
1915 t->stid_tab[stid].data = data;
1916 stid += t->stid_base;
1917 t->stids_in_use++;
1918 }
1919 spin_unlock_bh(&t->stid_lock);
1920 return stid;
1921}
1922EXPORT_SYMBOL(cxgb4_alloc_stid);
1923
1924/*
1925 * Release a server TID.
1926 */
1927void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
1928{
1929 stid -= t->stid_base;
1930 spin_lock_bh(&t->stid_lock);
1931 if (family == PF_INET)
1932 __clear_bit(stid, t->stid_bmap);
1933 else
1934 bitmap_release_region(t->stid_bmap, stid, 2);
1935 t->stid_tab[stid].data = NULL;
1936 t->stids_in_use--;
1937 spin_unlock_bh(&t->stid_lock);
1938}
1939EXPORT_SYMBOL(cxgb4_free_stid);
1940
1941/*
1942 * Populate a TID_RELEASE WR. Caller must properly size the skb.
1943 */
1944static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
1945 unsigned int tid)
1946{
1947 struct cpl_tid_release *req;
1948
1949 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
1950 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
1951 INIT_TP_WR(req, tid);
1952 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
1953}
1954
1955/*
1956 * Queue a TID release request and if necessary schedule a work queue to
1957 * process it.
1958 */
1959void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
1960 unsigned int tid)
1961{
1962 void **p = &t->tid_tab[tid];
1963 struct adapter *adap = container_of(t, struct adapter, tids);
1964
1965 spin_lock_bh(&adap->tid_release_lock);
1966 *p = adap->tid_release_head;
1967 /* Low 2 bits encode the Tx channel number */
1968 adap->tid_release_head = (void **)((uintptr_t)p | chan);
1969 if (!adap->tid_release_task_busy) {
1970 adap->tid_release_task_busy = true;
1971 schedule_work(&adap->tid_release_task);
1972 }
1973 spin_unlock_bh(&adap->tid_release_lock);
1974}
1975EXPORT_SYMBOL(cxgb4_queue_tid_release);
1976
1977/*
1978 * Process the list of pending TID release requests.
1979 */
1980static void process_tid_release_list(struct work_struct *work)
1981{
1982 struct sk_buff *skb;
1983 struct adapter *adap;
1984
1985 adap = container_of(work, struct adapter, tid_release_task);
1986
1987 spin_lock_bh(&adap->tid_release_lock);
1988 while (adap->tid_release_head) {
1989 void **p = adap->tid_release_head;
1990 unsigned int chan = (uintptr_t)p & 3;
1991 p = (void *)p - chan;
1992
1993 adap->tid_release_head = *p;
1994 *p = NULL;
1995 spin_unlock_bh(&adap->tid_release_lock);
1996
1997 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
1998 GFP_KERNEL)))
1999 schedule_timeout_uninterruptible(1);
2000
2001 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
2002 t4_ofld_send(adap, skb);
2003 spin_lock_bh(&adap->tid_release_lock);
2004 }
2005 adap->tid_release_task_busy = false;
2006 spin_unlock_bh(&adap->tid_release_lock);
2007}
2008
2009/*
2010 * Release a TID and inform HW. If we are unable to allocate the release
2011 * message we defer to a work queue.
2012 */
2013void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
2014{
2015 void *old;
2016 struct sk_buff *skb;
2017 struct adapter *adap = container_of(t, struct adapter, tids);
2018
2019 old = t->tid_tab[tid];
2020 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
2021 if (likely(skb)) {
2022 t->tid_tab[tid] = NULL;
2023 mk_tid_release(skb, chan, tid);
2024 t4_ofld_send(adap, skb);
2025 } else
2026 cxgb4_queue_tid_release(t, chan, tid);
2027 if (old)
2028 atomic_dec(&t->tids_in_use);
2029}
2030EXPORT_SYMBOL(cxgb4_remove_tid);
2031
2032/*
2033 * Allocate and initialize the TID tables. Returns 0 on success.
2034 */
2035static int tid_init(struct tid_info *t)
2036{
2037 size_t size;
2038 unsigned int natids = t->natids;
2039
2040 size = t->ntids * sizeof(*t->tid_tab) + natids * sizeof(*t->atid_tab) +
2041 t->nstids * sizeof(*t->stid_tab) +
2042 BITS_TO_LONGS(t->nstids) * sizeof(long);
2043 t->tid_tab = t4_alloc_mem(size);
2044 if (!t->tid_tab)
2045 return -ENOMEM;
2046
2047 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
2048 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
2049 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids];
2050 spin_lock_init(&t->stid_lock);
2051 spin_lock_init(&t->atid_lock);
2052
2053 t->stids_in_use = 0;
2054 t->afree = NULL;
2055 t->atids_in_use = 0;
2056 atomic_set(&t->tids_in_use, 0);
2057
2058 /* Setup the free list for atid_tab and clear the stid bitmap. */
2059 if (natids) {
2060 while (--natids)
2061 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
2062 t->afree = t->atid_tab;
2063 }
2064 bitmap_zero(t->stid_bmap, t->nstids);
2065 return 0;
2066}
2067
2068/**
2069 * cxgb4_create_server - create an IP server
2070 * @dev: the device
2071 * @stid: the server TID
2072 * @sip: local IP address to bind server to
2073 * @sport: the server's TCP port
2074 * @queue: queue to direct messages from this server to
2075 *
2076 * Create an IP server for the given port and address.
2077 * Returns <0 on error and one of the %NET_XMIT_* values on success.
2078 */
2079int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
2080 __be32 sip, __be16 sport, unsigned int queue)
2081{
2082 unsigned int chan;
2083 struct sk_buff *skb;
2084 struct adapter *adap;
2085 struct cpl_pass_open_req *req;
2086
2087 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
2088 if (!skb)
2089 return -ENOMEM;
2090
2091 adap = netdev2adap(dev);
2092 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
2093 INIT_TP_WR(req, 0);
2094 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
2095 req->local_port = sport;
2096 req->peer_port = htons(0);
2097 req->local_ip = sip;
2098 req->peer_ip = htonl(0);
2099 chan = netdev2pinfo(adap->sge.ingr_map[queue]->netdev)->tx_chan;
2100 req->opt0 = cpu_to_be64(TX_CHAN(chan));
2101 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
2102 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
2103 return t4_mgmt_tx(adap, skb);
2104}
2105EXPORT_SYMBOL(cxgb4_create_server);
2106
2107/**
2108 * cxgb4_create_server6 - create an IPv6 server
2109 * @dev: the device
2110 * @stid: the server TID
2111 * @sip: local IPv6 address to bind server to
2112 * @sport: the server's TCP port
2113 * @queue: queue to direct messages from this server to
2114 *
2115 * Create an IPv6 server for the given port and address.
2116 * Returns <0 on error and one of the %NET_XMIT_* values on success.
2117 */
2118int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
2119 const struct in6_addr *sip, __be16 sport,
2120 unsigned int queue)
2121{
2122 unsigned int chan;
2123 struct sk_buff *skb;
2124 struct adapter *adap;
2125 struct cpl_pass_open_req6 *req;
2126
2127 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
2128 if (!skb)
2129 return -ENOMEM;
2130
2131 adap = netdev2adap(dev);
2132 req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
2133 INIT_TP_WR(req, 0);
2134 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
2135 req->local_port = sport;
2136 req->peer_port = htons(0);
2137 req->local_ip_hi = *(__be64 *)(sip->s6_addr);
2138 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
2139 req->peer_ip_hi = cpu_to_be64(0);
2140 req->peer_ip_lo = cpu_to_be64(0);
2141 chan = netdev2pinfo(adap->sge.ingr_map[queue]->netdev)->tx_chan;
2142 req->opt0 = cpu_to_be64(TX_CHAN(chan));
2143 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
2144 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
2145 return t4_mgmt_tx(adap, skb);
2146}
2147EXPORT_SYMBOL(cxgb4_create_server6);
2148
2149/**
2150 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
2151 * @mtus: the HW MTU table
2152 * @mtu: the target MTU
2153 * @idx: index of selected entry in the MTU table
2154 *
2155 * Returns the index and the value in the HW MTU table that is closest to
2156 * but does not exceed @mtu, unless @mtu is smaller than any value in the
2157 * table, in which case that smallest available value is selected.
2158 */
2159unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
2160 unsigned int *idx)
2161{
2162 unsigned int i = 0;
2163
2164 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
2165 ++i;
2166 if (idx)
2167 *idx = i;
2168 return mtus[i];
2169}
2170EXPORT_SYMBOL(cxgb4_best_mtu);
2171
2172/**
2173 * cxgb4_port_chan - get the HW channel of a port
2174 * @dev: the net device for the port
2175 *
2176 * Return the HW Tx channel of the given port.
2177 */
2178unsigned int cxgb4_port_chan(const struct net_device *dev)
2179{
2180 return netdev2pinfo(dev)->tx_chan;
2181}
2182EXPORT_SYMBOL(cxgb4_port_chan);
2183
2184/**
2185 * cxgb4_port_viid - get the VI id of a port
2186 * @dev: the net device for the port
2187 *
2188 * Return the VI id of the given port.
2189 */
2190unsigned int cxgb4_port_viid(const struct net_device *dev)
2191{
2192 return netdev2pinfo(dev)->viid;
2193}
2194EXPORT_SYMBOL(cxgb4_port_viid);
2195
2196/**
2197 * cxgb4_port_idx - get the index of a port
2198 * @dev: the net device for the port
2199 *
2200 * Return the index of the given port.
2201 */
2202unsigned int cxgb4_port_idx(const struct net_device *dev)
2203{
2204 return netdev2pinfo(dev)->port_id;
2205}
2206EXPORT_SYMBOL(cxgb4_port_idx);
2207
2208/**
2209 * cxgb4_netdev_by_hwid - return the net device of a HW port
2210 * @pdev: identifies the adapter
2211 * @id: the HW port id
2212 *
2213 * Return the net device associated with the interface with the given HW
2214 * id.
2215 */
2216struct net_device *cxgb4_netdev_by_hwid(struct pci_dev *pdev, unsigned int id)
2217{
2218 const struct adapter *adap = pci_get_drvdata(pdev);
2219
2220 if (!adap || id >= NCHAN)
2221 return NULL;
2222 id = adap->chan_map[id];
2223 return id < MAX_NPORTS ? adap->port[id] : NULL;
2224}
2225EXPORT_SYMBOL(cxgb4_netdev_by_hwid);
2226
2227void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
2228 struct tp_tcp_stats *v6)
2229{
2230 struct adapter *adap = pci_get_drvdata(pdev);
2231
2232 spin_lock(&adap->stats_lock);
2233 t4_tp_get_tcp_stats(adap, v4, v6);
2234 spin_unlock(&adap->stats_lock);
2235}
2236EXPORT_SYMBOL(cxgb4_get_tcp_stats);
2237
2238void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
2239 const unsigned int *pgsz_order)
2240{
2241 struct adapter *adap = netdev2adap(dev);
2242
2243 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
2244 t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
2245 HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
2246 HPZ3(pgsz_order[3]));
2247}
2248EXPORT_SYMBOL(cxgb4_iscsi_init);
2249
2250static struct pci_driver cxgb4_driver;
2251
2252static void check_neigh_update(struct neighbour *neigh)
2253{
2254 const struct device *parent;
2255 const struct net_device *netdev = neigh->dev;
2256
2257 if (netdev->priv_flags & IFF_802_1Q_VLAN)
2258 netdev = vlan_dev_real_dev(netdev);
2259 parent = netdev->dev.parent;
2260 if (parent && parent->driver == &cxgb4_driver.driver)
2261 t4_l2t_update(dev_get_drvdata(parent), neigh);
2262}
2263
2264static int netevent_cb(struct notifier_block *nb, unsigned long event,
2265 void *data)
2266{
2267 switch (event) {
2268 case NETEVENT_NEIGH_UPDATE:
2269 check_neigh_update(data);
2270 break;
2271 case NETEVENT_PMTU_UPDATE:
2272 case NETEVENT_REDIRECT:
2273 default:
2274 break;
2275 }
2276 return 0;
2277}
2278
2279static bool netevent_registered;
2280static struct notifier_block cxgb4_netevent_nb = {
2281 .notifier_call = netevent_cb
2282};
2283
2284static void uld_attach(struct adapter *adap, unsigned int uld)
2285{
2286 void *handle;
2287 struct cxgb4_lld_info lli;
2288
2289 lli.pdev = adap->pdev;
2290 lli.l2t = adap->l2t;
2291 lli.tids = &adap->tids;
2292 lli.ports = adap->port;
2293 lli.vr = &adap->vres;
2294 lli.mtus = adap->params.mtus;
2295 if (uld == CXGB4_ULD_RDMA) {
2296 lli.rxq_ids = adap->sge.rdma_rxq;
2297 lli.nrxq = adap->sge.rdmaqs;
2298 } else if (uld == CXGB4_ULD_ISCSI) {
2299 lli.rxq_ids = adap->sge.ofld_rxq;
2300 lli.nrxq = adap->sge.ofldqsets;
2301 }
2302 lli.ntxq = adap->sge.ofldqsets;
2303 lli.nchan = adap->params.nports;
2304 lli.nports = adap->params.nports;
2305 lli.wr_cred = adap->params.ofldq_wr_cred;
2306 lli.adapter_type = adap->params.rev;
2307 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
2308 lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
2309 t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF));
2310 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
2311 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF));
2312 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
2313 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
2314 lli.fw_vers = adap->params.fw_vers;
2315
2316 handle = ulds[uld].add(&lli);
2317 if (IS_ERR(handle)) {
2318 dev_warn(adap->pdev_dev,
2319 "could not attach to the %s driver, error %ld\n",
2320 uld_str[uld], PTR_ERR(handle));
2321 return;
2322 }
2323
2324 adap->uld_handle[uld] = handle;
2325
2326 if (!netevent_registered) {
2327 register_netevent_notifier(&cxgb4_netevent_nb);
2328 netevent_registered = true;
2329 }
Dimitris Michailidise29f5db2010-05-18 10:07:13 +00002330
2331 if (adap->flags & FULL_INIT_DONE)
2332 ulds[uld].state_change(handle, CXGB4_STATE_UP);
Dimitris Michailidisb8ff05a92010-04-01 15:28:26 +00002333}
2334
2335static void attach_ulds(struct adapter *adap)
2336{
2337 unsigned int i;
2338
2339 mutex_lock(&uld_mutex);
2340 list_add_tail(&adap->list_node, &adapter_list);
2341 for (i = 0; i < CXGB4_ULD_MAX; i++)
2342 if (ulds[i].add)
2343 uld_attach(adap, i);
2344 mutex_unlock(&uld_mutex);
2345}
2346
2347static void detach_ulds(struct adapter *adap)
2348{
2349 unsigned int i;
2350
2351 mutex_lock(&uld_mutex);
2352 list_del(&adap->list_node);
2353 for (i = 0; i < CXGB4_ULD_MAX; i++)
2354 if (adap->uld_handle[i]) {
2355 ulds[i].state_change(adap->uld_handle[i],
2356 CXGB4_STATE_DETACH);
2357 adap->uld_handle[i] = NULL;
2358 }
2359 if (netevent_registered && list_empty(&adapter_list)) {
2360 unregister_netevent_notifier(&cxgb4_netevent_nb);
2361 netevent_registered = false;
2362 }
2363 mutex_unlock(&uld_mutex);
2364}
2365
2366static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
2367{
2368 unsigned int i;
2369
2370 mutex_lock(&uld_mutex);
2371 for (i = 0; i < CXGB4_ULD_MAX; i++)
2372 if (adap->uld_handle[i])
2373 ulds[i].state_change(adap->uld_handle[i], new_state);
2374 mutex_unlock(&uld_mutex);
2375}
2376
2377/**
2378 * cxgb4_register_uld - register an upper-layer driver
2379 * @type: the ULD type
2380 * @p: the ULD methods
2381 *
2382 * Registers an upper-layer driver with this driver and notifies the ULD
2383 * about any presently available devices that support its type. Returns
2384 * %-EBUSY if a ULD of the same type is already registered.
2385 */
2386int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
2387{
2388 int ret = 0;
2389 struct adapter *adap;
2390
2391 if (type >= CXGB4_ULD_MAX)
2392 return -EINVAL;
2393 mutex_lock(&uld_mutex);
2394 if (ulds[type].add) {
2395 ret = -EBUSY;
2396 goto out;
2397 }
2398 ulds[type] = *p;
2399 list_for_each_entry(adap, &adapter_list, list_node)
2400 uld_attach(adap, type);
2401out: mutex_unlock(&uld_mutex);
2402 return ret;
2403}
2404EXPORT_SYMBOL(cxgb4_register_uld);
2405
2406/**
2407 * cxgb4_unregister_uld - unregister an upper-layer driver
2408 * @type: the ULD type
2409 *
2410 * Unregisters an existing upper-layer driver.
2411 */
2412int cxgb4_unregister_uld(enum cxgb4_uld type)
2413{
2414 struct adapter *adap;
2415
2416 if (type >= CXGB4_ULD_MAX)
2417 return -EINVAL;
2418 mutex_lock(&uld_mutex);
2419 list_for_each_entry(adap, &adapter_list, list_node)
2420 adap->uld_handle[type] = NULL;
2421 ulds[type].add = NULL;
2422 mutex_unlock(&uld_mutex);
2423 return 0;
2424}
2425EXPORT_SYMBOL(cxgb4_unregister_uld);
2426
2427/**
2428 * cxgb_up - enable the adapter
2429 * @adap: adapter being enabled
2430 *
2431 * Called when the first port is enabled, this function performs the
2432 * actions necessary to make an adapter operational, such as completing
2433 * the initialization of HW modules, and enabling interrupts.
2434 *
2435 * Must be called with the rtnl lock held.
2436 */
2437static int cxgb_up(struct adapter *adap)
2438{
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00002439 int err;
Dimitris Michailidisb8ff05a92010-04-01 15:28:26 +00002440
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00002441 err = setup_sge_queues(adap);
2442 if (err)
2443 goto out;
2444 err = setup_rss(adap);
2445 if (err)
2446 goto freeq;
Dimitris Michailidisb8ff05a92010-04-01 15:28:26 +00002447
2448 if (adap->flags & USING_MSIX) {
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00002449 name_msix_vecs(adap);
Dimitris Michailidisb8ff05a92010-04-01 15:28:26 +00002450 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
2451 adap->msix_info[0].desc, adap);
2452 if (err)
2453 goto irq_err;
2454
2455 err = request_msix_queue_irqs(adap);
2456 if (err) {
2457 free_irq(adap->msix_info[0].vec, adap);
2458 goto irq_err;
2459 }
2460 } else {
2461 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
2462 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
2463 adap->name, adap);
2464 if (err)
2465 goto irq_err;
2466 }
2467 enable_rx(adap);
2468 t4_sge_start(adap);
2469 t4_intr_enable(adap);
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00002470 adap->flags |= FULL_INIT_DONE;
Dimitris Michailidisb8ff05a92010-04-01 15:28:26 +00002471 notify_ulds(adap, CXGB4_STATE_UP);
2472 out:
2473 return err;
2474 irq_err:
2475 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00002476 freeq:
2477 t4_free_sge_resources(adap);
Dimitris Michailidisb8ff05a92010-04-01 15:28:26 +00002478 goto out;
2479}
2480
2481static void cxgb_down(struct adapter *adapter)
2482{
2483 t4_intr_disable(adapter);
2484 cancel_work_sync(&adapter->tid_release_task);
2485 adapter->tid_release_task_busy = false;
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00002486 adapter->tid_release_head = NULL;
Dimitris Michailidisb8ff05a92010-04-01 15:28:26 +00002487
2488 if (adapter->flags & USING_MSIX) {
2489 free_msix_queue_irqs(adapter);
2490 free_irq(adapter->msix_info[0].vec, adapter);
2491 } else
2492 free_irq(adapter->pdev->irq, adapter);
2493 quiesce_rx(adapter);
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00002494 t4_sge_stop(adapter);
2495 t4_free_sge_resources(adapter);
2496 adapter->flags &= ~FULL_INIT_DONE;
Dimitris Michailidisb8ff05a92010-04-01 15:28:26 +00002497}
2498
2499/*
2500 * net_device operations
2501 */
2502static int cxgb_open(struct net_device *dev)
2503{
2504 int err;
2505 struct port_info *pi = netdev_priv(dev);
2506 struct adapter *adapter = pi->adapter;
2507
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00002508 if (!(adapter->flags & FULL_INIT_DONE)) {
2509 err = cxgb_up(adapter);
2510 if (err < 0)
2511 return err;
2512 }
Dimitris Michailidisb8ff05a92010-04-01 15:28:26 +00002513
2514 dev->real_num_tx_queues = pi->nqsets;
Dimitris Michailidisf68707b2010-06-18 10:05:32 +00002515 err = link_start(dev);
2516 if (!err)
2517 netif_tx_start_all_queues(dev);
2518 return err;
Dimitris Michailidisb8ff05a92010-04-01 15:28:26 +00002519}
2520
2521static int cxgb_close(struct net_device *dev)
2522{
Dimitris Michailidisb8ff05a92010-04-01 15:28:26 +00002523 struct port_info *pi = netdev_priv(dev);
2524 struct adapter *adapter = pi->adapter;
2525
2526 netif_tx_stop_all_queues(dev);
2527 netif_carrier_off(dev);
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00002528 return t4_enable_vi(adapter, 0, pi->viid, false, false);
Dimitris Michailidisb8ff05a92010-04-01 15:28:26 +00002529}
2530
Dimitris Michailidis9be793b2010-06-18 10:05:31 +00002531static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev)
Dimitris Michailidisb8ff05a92010-04-01 15:28:26 +00002532{
2533 struct port_stats stats;
2534 struct port_info *p = netdev_priv(dev);
2535 struct adapter *adapter = p->adapter;
Dimitris Michailidis9be793b2010-06-18 10:05:31 +00002536 struct rtnl_link_stats64 *ns = &dev->stats64;
Dimitris Michailidisb8ff05a92010-04-01 15:28:26 +00002537
2538 spin_lock(&adapter->stats_lock);
2539 t4_get_port_stats(adapter, p->tx_chan, &stats);
2540 spin_unlock(&adapter->stats_lock);
2541
2542 ns->tx_bytes = stats.tx_octets;
2543 ns->tx_packets = stats.tx_frames;
2544 ns->rx_bytes = stats.rx_octets;
2545 ns->rx_packets = stats.rx_frames;
2546 ns->multicast = stats.rx_mcast_frames;
2547
2548 /* detailed rx_errors */
2549 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
2550 stats.rx_runt;
2551 ns->rx_over_errors = 0;
2552 ns->rx_crc_errors = stats.rx_fcs_err;
2553 ns->rx_frame_errors = stats.rx_symbol_err;
2554 ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
2555 stats.rx_ovflow2 + stats.rx_ovflow3 +
2556 stats.rx_trunc0 + stats.rx_trunc1 +
2557 stats.rx_trunc2 + stats.rx_trunc3;
2558 ns->rx_missed_errors = 0;
2559
2560 /* detailed tx_errors */
2561 ns->tx_aborted_errors = 0;
2562 ns->tx_carrier_errors = 0;
2563 ns->tx_fifo_errors = 0;
2564 ns->tx_heartbeat_errors = 0;
2565 ns->tx_window_errors = 0;
2566
2567 ns->tx_errors = stats.tx_error_frames;
2568 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
2569 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
2570 return ns;
2571}
2572
2573static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2574{
2575 int ret = 0, prtad, devad;
2576 struct port_info *pi = netdev_priv(dev);
2577 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
2578
2579 switch (cmd) {
2580 case SIOCGMIIPHY:
2581 if (pi->mdio_addr < 0)
2582 return -EOPNOTSUPP;
2583 data->phy_id = pi->mdio_addr;
2584 break;
2585 case SIOCGMIIREG:
2586 case SIOCSMIIREG:
2587 if (mdio_phy_id_is_c45(data->phy_id)) {
2588 prtad = mdio_phy_id_prtad(data->phy_id);
2589 devad = mdio_phy_id_devad(data->phy_id);
2590 } else if (data->phy_id < 32) {
2591 prtad = data->phy_id;
2592 devad = 0;
2593 data->reg_num &= 0x1f;
2594 } else
2595 return -EINVAL;
2596
2597 if (cmd == SIOCGMIIREG)
2598 ret = t4_mdio_rd(pi->adapter, 0, prtad, devad,
2599 data->reg_num, &data->val_out);
2600 else
2601 ret = t4_mdio_wr(pi->adapter, 0, prtad, devad,
2602 data->reg_num, data->val_in);
2603 break;
2604 default:
2605 return -EOPNOTSUPP;
2606 }
2607 return ret;
2608}
2609
2610static void cxgb_set_rxmode(struct net_device *dev)
2611{
2612 /* unfortunately we can't return errors to the stack */
2613 set_rxmode(dev, -1, false);
2614}
2615
2616static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2617{
2618 int ret;
2619 struct port_info *pi = netdev_priv(dev);
2620
2621 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
2622 return -EINVAL;
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +00002623 ret = t4_set_rxmode(pi->adapter, 0, pi->viid, new_mtu, -1, -1, -1, -1,
Dimitris Michailidisb8ff05a92010-04-01 15:28:26 +00002624 true);
2625 if (!ret)
2626 dev->mtu = new_mtu;
2627 return ret;
2628}
2629
2630static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2631{
2632 int ret;
2633 struct sockaddr *addr = p;
2634 struct port_info *pi = netdev_priv(dev);
2635
2636 if (!is_valid_ether_addr(addr->sa_data))
2637 return -EINVAL;
2638
2639 ret = t4_change_mac(pi->adapter, 0, pi->viid, pi->xact_addr_filt,
2640 addr->sa_data, true, true);
2641 if (ret < 0)
2642 return ret;
2643
2644 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2645 pi->xact_addr_filt = ret;
2646 return 0;
2647}
2648
2649static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2650{
2651 struct port_info *pi = netdev_priv(dev);
2652
2653 pi->vlan_grp = grp;
Dimitris Michailidisf8f5aaf2010-05-10 15:58:07 +00002654 t4_set_rxmode(pi->adapter, 0, pi->viid, -1, -1, -1, -1, grp != NULL,
2655 true);
Dimitris Michailidisb8ff05a92010-04-01 15:28:26 +00002656}
2657
2658#ifdef CONFIG_NET_POLL_CONTROLLER
2659static void cxgb_netpoll(struct net_device *dev)
2660{
2661 struct port_info *pi = netdev_priv(dev);
2662 struct adapter *adap = pi->adapter;
2663
2664 if (adap->flags & USING_MSIX) {
2665 int i;
2666 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
2667
2668 for (i = pi->nqsets; i; i--, rx++)
2669 t4_sge_intr_msix(0, &rx->rspq);
2670 } else
2671 t4_intr_handler(adap)(0, adap);
2672}
2673#endif
2674
2675static const struct net_device_ops cxgb4_netdev_ops = {
2676 .ndo_open = cxgb_open,
2677 .ndo_stop = cxgb_close,
2678 .ndo_start_xmit = t4_eth_xmit,
Dimitris Michailidis9be793b2010-06-18 10:05:31 +00002679 .ndo_get_stats64 = cxgb_get_stats,
Dimitris Michailidisb8ff05a92010-04-01 15:28:26 +00002680 .ndo_set_rx_mode = cxgb_set_rxmode,
2681 .ndo_set_mac_address = cxgb_set_mac_addr,
2682 .ndo_validate_addr = eth_validate_addr,
2683 .ndo_do_ioctl = cxgb_ioctl,
2684 .ndo_change_mtu = cxgb_change_mtu,
2685 .ndo_vlan_rx_register = vlan_rx_register,
2686#ifdef CONFIG_NET_POLL_CONTROLLER
2687 .ndo_poll_controller = cxgb_netpoll,
2688#endif
2689};
2690
2691void t4_fatal_err(struct adapter *adap)
2692{
2693 t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
2694 t4_intr_disable(adap);
2695 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
2696}
2697
2698static void setup_memwin(struct adapter *adap)
2699{
2700 u32 bar0;
2701
2702 bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */
2703 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
2704 (bar0 + MEMWIN0_BASE) | BIR(0) |
2705 WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
2706 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
2707 (bar0 + MEMWIN1_BASE) | BIR(0) |
2708 WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
2709 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
2710 (bar0 + MEMWIN2_BASE) | BIR(0) |
2711 WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
2712}
2713
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00002714static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
2715{
2716 u32 v;
2717 int ret;
2718
2719 /* get device capabilities */
2720 memset(c, 0, sizeof(*c));
2721 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2722 FW_CMD_REQUEST | FW_CMD_READ);
2723 c->retval_len16 = htonl(FW_LEN16(*c));
2724 ret = t4_wr_mbox(adap, 0, c, sizeof(*c), c);
2725 if (ret < 0)
2726 return ret;
2727
2728 /* select capabilities we'll be using */
2729 if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
2730 if (!vf_acls)
2731 c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
2732 else
2733 c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
2734 } else if (vf_acls) {
2735 dev_err(adap->pdev_dev, "virtualization ACLs not supported");
2736 return ret;
2737 }
2738 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2739 FW_CMD_REQUEST | FW_CMD_WRITE);
2740 ret = t4_wr_mbox(adap, 0, c, sizeof(*c), NULL);
2741 if (ret < 0)
2742 return ret;
2743
2744 ret = t4_config_glbl_rss(adap, 0,
2745 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
2746 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
2747 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
2748 if (ret < 0)
2749 return ret;
2750
2751 ret = t4_cfg_pfvf(adap, 0, 0, 0, 64, 64, 64, 0, 0, 4, 0xf, 0xf, 16,
2752 FW_CMD_CAP_PF, FW_CMD_CAP_PF);
2753 if (ret < 0)
2754 return ret;
2755
2756 t4_sge_init(adap);
2757
2758 /* get basic stuff going */
2759 ret = t4_early_init(adap, 0);
2760 if (ret < 0)
2761 return ret;
2762
2763 /* tweak some settings */
2764 t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
2765 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
2766 t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
2767 v = t4_read_reg(adap, TP_PIO_DATA);
2768 t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
2769 setup_memwin(adap);
2770 return 0;
2771}
2772
Dimitris Michailidisb8ff05a92010-04-01 15:28:26 +00002773/*
2774 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
2775 */
2776#define MAX_ATIDS 8192U
2777
2778/*
2779 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
2780 */
2781static int adap_init0(struct adapter *adap)
2782{
2783 int ret;
2784 u32 v, port_vec;
2785 enum dev_state state;
2786 u32 params[7], val[7];
2787 struct fw_caps_config_cmd c;
2788
2789 ret = t4_check_fw_version(adap);
2790 if (ret == -EINVAL || ret > 0) {
2791 if (upgrade_fw(adap) >= 0) /* recache FW version */
2792 ret = t4_check_fw_version(adap);
2793 }
2794 if (ret < 0)
2795 return ret;
2796
2797 /* contact FW, request master */
2798 ret = t4_fw_hello(adap, 0, 0, MASTER_MUST, &state);
2799 if (ret < 0) {
2800 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
2801 ret);
2802 return ret;
2803 }
2804
2805 /* reset device */
2806 ret = t4_fw_reset(adap, 0, PIORSTMODE | PIORST);
2807 if (ret < 0)
2808 goto bye;
2809
Dimitris Michailidisb8ff05a92010-04-01 15:28:26 +00002810 for (v = 0; v < SGE_NTIMERS - 1; v++)
2811 adap->sge.timer_val[v] = min(intr_holdoff[v], MAX_SGE_TIMERVAL);
2812 adap->sge.timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
2813 adap->sge.counter_val[0] = 1;
2814 for (v = 1; v < SGE_NCOUNTERS; v++)
2815 adap->sge.counter_val[v] = min(intr_cnt[v - 1],
2816 THRESHOLD_3_MASK);
Dimitris Michailidis02b5fb82010-06-18 10:05:28 +00002817 ret = adap_init1(adap, &c);
Dimitris Michailidisb8ff05a92010-04-01 15:28:26 +00002818 if (ret < 0)
2819 goto bye;
2820
2821#define FW_PARAM_DEV(param) \
2822 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
2823 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
2824
2825#define FW_PARAM_PFVF(param) \
2826 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
2827 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
2828
2829 params[0] = FW_PARAM_DEV(PORTVEC);
2830 params[1] = FW_PARAM_PFVF(L2T_START);
2831 params[2] = FW_PARAM_PFVF(L2T_END);
2832 params[3] = FW_PARAM_PFVF(FILTER_START);
2833 params[4] = FW_PARAM_PFVF(FILTER_END);
2834 ret = t4_query_params(adap, 0, 0, 0, 5, params, val);
2835 if (ret < 0)
2836 goto bye;
2837 port_vec = val[0];
2838 adap->tids.ftid_base = val[3];
2839 adap->tids.nftids = val[4] - val[3] + 1;
2840
2841 if (c.ofldcaps) {
2842 /* query offload-related parameters */
2843 params[0] = FW_PARAM_DEV(NTID);
2844 params[1] = FW_PARAM_PFVF(SERVER_START);
2845 params[2] = FW_PARAM_PFVF(SERVER_END);
2846 params[3] = FW_PARAM_PFVF(TDDP_START);
2847 params[4] = FW_PARAM_PFVF(TDDP_END);
2848 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
2849 ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
2850 if (ret < 0)
2851 goto bye;
2852 adap->tids.ntids = val[0];
2853 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
2854 adap->tids.stid_base = val[1];
2855 adap->tids.nstids = val[2] - val[1] + 1;
2856 adap->vres.ddp.start = val[3];
2857 adap->vres.ddp.size = val[4] - val[3] + 1;
2858 adap->params.ofldq_wr_cred = val[5];
2859 adap->params.offload = 1;
2860 }
2861 if (c.rdmacaps) {
2862 params[0] = FW_PARAM_PFVF(STAG_START);
2863 params[1] = FW_PARAM_PFVF(STAG_END);
2864 params[2] = FW_PARAM_PFVF(RQ_START);
2865 params[3] = FW_PARAM_PFVF(RQ_END);
2866 params[4] = FW_PARAM_PFVF(PBL_START);
2867 params[5] = FW_PARAM_PFVF(PBL_END);
2868 ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
2869 if (ret < 0)
2870 goto bye;
2871 adap->vres.stag.start = val[0];
2872 adap->vres.stag.size = val[1] - val[0] + 1;
2873 adap->vres.rq.start = val[2];
2874 adap->vres.rq.size = val[3] - val[2] + 1;
2875 adap->vres.pbl.start = val[4];
2876 adap->vres.pbl.size = val[5] - val[4] + 1;
2877 }
2878 if (c.iscsicaps) {
2879 params[0] = FW_PARAM_PFVF(ISCSI_START);
2880 params[1] = FW_PARAM_PFVF(ISCSI_END);
2881 ret = t4_query_params(adap, 0, 0, 0, 2, params, val);
2882 if (ret < 0)
2883 goto bye;
2884 adap->vres.iscsi.start = val[0];
2885 adap->vres.iscsi.size = val[1] - val[0] + 1;
2886 }
2887#undef FW_PARAM_PFVF
2888#undef FW_PARAM_DEV
2889
2890 adap->params.nports = hweight32(port_vec);
2891 adap->params.portvec = port_vec;
2892 adap->flags |= FW_OK;
2893
2894 /* These are finalized by FW initialization, load their values now */
2895 v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
2896 adap->params.tp.tre = TIMERRESOLUTION_GET(v);
2897 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
2898 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
2899 adap->params.b_wnd);
Dimitris Michailidisb8ff05a92010-04-01 15:28:26 +00002900 return 0;
2901
2902 /*
2903 * If a command timed out or failed with EIO FW does not operate within
2904 * its spec or something catastrophic happened to HW/FW, stop issuing
2905 * commands.
2906 */
2907bye: if (ret != -ETIMEDOUT && ret != -EIO)
2908 t4_fw_bye(adap, 0);
2909 return ret;
2910}
2911
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00002912/* EEH callbacks */
2913
2914static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
2915 pci_channel_state_t state)
2916{
2917 int i;
2918 struct adapter *adap = pci_get_drvdata(pdev);
2919
2920 if (!adap)
2921 goto out;
2922
2923 rtnl_lock();
2924 adap->flags &= ~FW_OK;
2925 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
2926 for_each_port(adap, i) {
2927 struct net_device *dev = adap->port[i];
2928
2929 netif_device_detach(dev);
2930 netif_carrier_off(dev);
2931 }
2932 if (adap->flags & FULL_INIT_DONE)
2933 cxgb_down(adap);
2934 rtnl_unlock();
2935 pci_disable_device(pdev);
2936out: return state == pci_channel_io_perm_failure ?
2937 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
2938}
2939
2940static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
2941{
2942 int i, ret;
2943 struct fw_caps_config_cmd c;
2944 struct adapter *adap = pci_get_drvdata(pdev);
2945
2946 if (!adap) {
2947 pci_restore_state(pdev);
2948 pci_save_state(pdev);
2949 return PCI_ERS_RESULT_RECOVERED;
2950 }
2951
2952 if (pci_enable_device(pdev)) {
2953 dev_err(&pdev->dev, "cannot reenable PCI device after reset\n");
2954 return PCI_ERS_RESULT_DISCONNECT;
2955 }
2956
2957 pci_set_master(pdev);
2958 pci_restore_state(pdev);
2959 pci_save_state(pdev);
2960 pci_cleanup_aer_uncorrect_error_status(pdev);
2961
2962 if (t4_wait_dev_ready(adap) < 0)
2963 return PCI_ERS_RESULT_DISCONNECT;
2964 if (t4_fw_hello(adap, 0, 0, MASTER_MUST, NULL))
2965 return PCI_ERS_RESULT_DISCONNECT;
2966 adap->flags |= FW_OK;
2967 if (adap_init1(adap, &c))
2968 return PCI_ERS_RESULT_DISCONNECT;
2969
2970 for_each_port(adap, i) {
2971 struct port_info *p = adap2pinfo(adap, i);
2972
2973 ret = t4_alloc_vi(adap, 0, p->tx_chan, 0, 0, 1, NULL, NULL);
2974 if (ret < 0)
2975 return PCI_ERS_RESULT_DISCONNECT;
2976 p->viid = ret;
2977 p->xact_addr_filt = -1;
2978 }
2979
2980 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
2981 adap->params.b_wnd);
2982 if (cxgb_up(adap))
2983 return PCI_ERS_RESULT_DISCONNECT;
2984 return PCI_ERS_RESULT_RECOVERED;
2985}
2986
2987static void eeh_resume(struct pci_dev *pdev)
2988{
2989 int i;
2990 struct adapter *adap = pci_get_drvdata(pdev);
2991
2992 if (!adap)
2993 return;
2994
2995 rtnl_lock();
2996 for_each_port(adap, i) {
2997 struct net_device *dev = adap->port[i];
2998
2999 if (netif_running(dev)) {
3000 link_start(dev);
3001 cxgb_set_rxmode(dev);
3002 }
3003 netif_device_attach(dev);
3004 }
3005 rtnl_unlock();
3006}
3007
3008static struct pci_error_handlers cxgb4_eeh = {
3009 .error_detected = eeh_err_detected,
3010 .slot_reset = eeh_slot_reset,
3011 .resume = eeh_resume,
3012};
3013
Dimitris Michailidisb8ff05a92010-04-01 15:28:26 +00003014static inline bool is_10g_port(const struct link_config *lc)
3015{
3016 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
3017}
3018
3019static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
3020 unsigned int size, unsigned int iqe_size)
3021{
3022 q->intr_params = QINTR_TIMER_IDX(timer_idx) |
3023 (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0);
3024 q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0;
3025 q->iqe_len = iqe_size;
3026 q->size = size;
3027}
3028
3029/*
3030 * Perform default configuration of DMA queues depending on the number and type
3031 * of ports we found and the number of available CPUs. Most settings can be
3032 * modified by the admin prior to actual use.
3033 */
3034static void __devinit cfg_queues(struct adapter *adap)
3035{
3036 struct sge *s = &adap->sge;
3037 int i, q10g = 0, n10g = 0, qidx = 0;
3038
3039 for_each_port(adap, i)
3040 n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg);
3041
3042 /*
3043 * We default to 1 queue per non-10G port and up to # of cores queues
3044 * per 10G port.
3045 */
3046 if (n10g)
3047 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
3048 if (q10g > num_online_cpus())
3049 q10g = num_online_cpus();
3050
3051 for_each_port(adap, i) {
3052 struct port_info *pi = adap2pinfo(adap, i);
3053
3054 pi->first_qset = qidx;
3055 pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
3056 qidx += pi->nqsets;
3057 }
3058
3059 s->ethqsets = qidx;
3060 s->max_ethqsets = qidx; /* MSI-X may lower it later */
3061
3062 if (is_offload(adap)) {
3063 /*
3064 * For offload we use 1 queue/channel if all ports are up to 1G,
3065 * otherwise we divide all available queues amongst the channels
3066 * capped by the number of available cores.
3067 */
3068 if (n10g) {
3069 i = min_t(int, ARRAY_SIZE(s->ofldrxq),
3070 num_online_cpus());
3071 s->ofldqsets = roundup(i, adap->params.nports);
3072 } else
3073 s->ofldqsets = adap->params.nports;
3074 /* For RDMA one Rx queue per channel suffices */
3075 s->rdmaqs = adap->params.nports;
3076 }
3077
3078 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
3079 struct sge_eth_rxq *r = &s->ethrxq[i];
3080
3081 init_rspq(&r->rspq, 0, 0, 1024, 64);
3082 r->fl.size = 72;
3083 }
3084
3085 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
3086 s->ethtxq[i].q.size = 1024;
3087
3088 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
3089 s->ctrlq[i].q.size = 512;
3090
3091 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
3092 s->ofldtxq[i].q.size = 1024;
3093
3094 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
3095 struct sge_ofld_rxq *r = &s->ofldrxq[i];
3096
3097 init_rspq(&r->rspq, 0, 0, 1024, 64);
3098 r->rspq.uld = CXGB4_ULD_ISCSI;
3099 r->fl.size = 72;
3100 }
3101
3102 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
3103 struct sge_ofld_rxq *r = &s->rdmarxq[i];
3104
3105 init_rspq(&r->rspq, 0, 0, 511, 64);
3106 r->rspq.uld = CXGB4_ULD_RDMA;
3107 r->fl.size = 72;
3108 }
3109
3110 init_rspq(&s->fw_evtq, 6, 0, 512, 64);
3111 init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64);
3112}
3113
3114/*
3115 * Reduce the number of Ethernet queues across all ports to at most n.
3116 * n provides at least one queue per port.
3117 */
3118static void __devinit reduce_ethqs(struct adapter *adap, int n)
3119{
3120 int i;
3121 struct port_info *pi;
3122
3123 while (n < adap->sge.ethqsets)
3124 for_each_port(adap, i) {
3125 pi = adap2pinfo(adap, i);
3126 if (pi->nqsets > 1) {
3127 pi->nqsets--;
3128 adap->sge.ethqsets--;
3129 if (adap->sge.ethqsets <= n)
3130 break;
3131 }
3132 }
3133
3134 n = 0;
3135 for_each_port(adap, i) {
3136 pi = adap2pinfo(adap, i);
3137 pi->first_qset = n;
3138 n += pi->nqsets;
3139 }
3140}
3141
3142/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
3143#define EXTRA_VECS 2
3144
3145static int __devinit enable_msix(struct adapter *adap)
3146{
3147 int ofld_need = 0;
3148 int i, err, want, need;
3149 struct sge *s = &adap->sge;
3150 unsigned int nchan = adap->params.nports;
3151 struct msix_entry entries[MAX_INGQ + 1];
3152
3153 for (i = 0; i < ARRAY_SIZE(entries); ++i)
3154 entries[i].entry = i;
3155
3156 want = s->max_ethqsets + EXTRA_VECS;
3157 if (is_offload(adap)) {
3158 want += s->rdmaqs + s->ofldqsets;
3159 /* need nchan for each possible ULD */
3160 ofld_need = 2 * nchan;
3161 }
3162 need = adap->params.nports + EXTRA_VECS + ofld_need;
3163
3164 while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need)
3165 want = err;
3166
3167 if (!err) {
3168 /*
3169 * Distribute available vectors to the various queue groups.
3170 * Every group gets its minimum requirement and NIC gets top
3171 * priority for leftovers.
3172 */
3173 i = want - EXTRA_VECS - ofld_need;
3174 if (i < s->max_ethqsets) {
3175 s->max_ethqsets = i;
3176 if (i < s->ethqsets)
3177 reduce_ethqs(adap, i);
3178 }
3179 if (is_offload(adap)) {
3180 i = want - EXTRA_VECS - s->max_ethqsets;
3181 i -= ofld_need - nchan;
3182 s->ofldqsets = (i / nchan) * nchan; /* round down */
3183 }
3184 for (i = 0; i < want; ++i)
3185 adap->msix_info[i].vec = entries[i].vector;
3186 } else if (err > 0)
3187 dev_info(adap->pdev_dev,
3188 "only %d MSI-X vectors left, not using MSI-X\n", err);
3189 return err;
3190}
3191
3192#undef EXTRA_VECS
3193
3194static void __devinit print_port_info(struct adapter *adap)
3195{
3196 static const char *base[] = {
3197 "R", "KX4", "T", "KX", "T", "KR", "CX4"
3198 };
3199
3200 int i;
3201 char buf[80];
Dimitris Michailidisf1a051b2010-05-10 15:58:08 +00003202 const char *spd = "";
3203
3204 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
3205 spd = " 2.5 GT/s";
3206 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
3207 spd = " 5 GT/s";
Dimitris Michailidisb8ff05a92010-04-01 15:28:26 +00003208
3209 for_each_port(adap, i) {
3210 struct net_device *dev = adap->port[i];
3211 const struct port_info *pi = netdev_priv(dev);
3212 char *bufp = buf;
3213
3214 if (!test_bit(i, &adap->registered_device_map))
3215 continue;
3216
3217 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
3218 bufp += sprintf(bufp, "100/");
3219 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
3220 bufp += sprintf(bufp, "1000/");
3221 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
3222 bufp += sprintf(bufp, "10G/");
3223 if (bufp != buf)
3224 --bufp;
3225 sprintf(bufp, "BASE-%s", base[pi->port_type]);
3226
Dimitris Michailidisf1a051b2010-05-10 15:58:08 +00003227 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
Dimitris Michailidisb8ff05a92010-04-01 15:28:26 +00003228 adap->params.vpd.id, adap->params.rev,
3229 buf, is_offload(adap) ? "R" : "",
Dimitris Michailidisf1a051b2010-05-10 15:58:08 +00003230 adap->params.pci.width, spd,
Dimitris Michailidisb8ff05a92010-04-01 15:28:26 +00003231 (adap->flags & USING_MSIX) ? " MSI-X" :
3232 (adap->flags & USING_MSI) ? " MSI" : "");
3233 if (adap->name == dev->name)
3234 netdev_info(dev, "S/N: %s, E/C: %s\n",
3235 adap->params.vpd.sn, adap->params.vpd.ec);
3236 }
3237}
3238
3239#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |\
3240 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
3241
3242static int __devinit init_one(struct pci_dev *pdev,
3243 const struct pci_device_id *ent)
3244{
3245 int func, i, err;
3246 struct port_info *pi;
3247 unsigned int highdma = 0;
3248 struct adapter *adapter = NULL;
3249
3250 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
3251
3252 err = pci_request_regions(pdev, KBUILD_MODNAME);
3253 if (err) {
3254 /* Just info, some other driver may have claimed the device. */
3255 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3256 return err;
3257 }
3258
3259 /* We control everything through PF 0 */
3260 func = PCI_FUNC(pdev->devfn);
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00003261 if (func > 0) {
3262 pci_save_state(pdev); /* to restore SR-IOV later */
Dimitris Michailidisb8ff05a92010-04-01 15:28:26 +00003263 goto sriov;
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00003264 }
Dimitris Michailidisb8ff05a92010-04-01 15:28:26 +00003265
3266 err = pci_enable_device(pdev);
3267 if (err) {
3268 dev_err(&pdev->dev, "cannot enable PCI device\n");
3269 goto out_release_regions;
3270 }
3271
3272 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3273 highdma = NETIF_F_HIGHDMA;
3274 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3275 if (err) {
3276 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3277 "coherent allocations\n");
3278 goto out_disable_device;
3279 }
3280 } else {
3281 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3282 if (err) {
3283 dev_err(&pdev->dev, "no usable DMA configuration\n");
3284 goto out_disable_device;
3285 }
3286 }
3287
3288 pci_enable_pcie_error_reporting(pdev);
3289 pci_set_master(pdev);
3290 pci_save_state(pdev);
3291
3292 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3293 if (!adapter) {
3294 err = -ENOMEM;
3295 goto out_disable_device;
3296 }
3297
3298 adapter->regs = pci_ioremap_bar(pdev, 0);
3299 if (!adapter->regs) {
3300 dev_err(&pdev->dev, "cannot map device registers\n");
3301 err = -ENOMEM;
3302 goto out_free_adapter;
3303 }
3304
3305 adapter->pdev = pdev;
3306 adapter->pdev_dev = &pdev->dev;
3307 adapter->name = pci_name(pdev);
3308 adapter->msg_enable = dflt_msg_enable;
3309 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
3310
3311 spin_lock_init(&adapter->stats_lock);
3312 spin_lock_init(&adapter->tid_release_lock);
3313
3314 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
3315
3316 err = t4_prep_adapter(adapter);
3317 if (err)
3318 goto out_unmap_bar;
3319 err = adap_init0(adapter);
3320 if (err)
3321 goto out_unmap_bar;
3322
3323 for_each_port(adapter, i) {
3324 struct net_device *netdev;
3325
3326 netdev = alloc_etherdev_mq(sizeof(struct port_info),
3327 MAX_ETH_QSETS);
3328 if (!netdev) {
3329 err = -ENOMEM;
3330 goto out_free_dev;
3331 }
3332
3333 SET_NETDEV_DEV(netdev, &pdev->dev);
3334
3335 adapter->port[i] = netdev;
3336 pi = netdev_priv(netdev);
3337 pi->adapter = adapter;
3338 pi->xact_addr_filt = -1;
3339 pi->rx_offload = RX_CSO;
3340 pi->port_id = i;
3341 netif_carrier_off(netdev);
3342 netif_tx_stop_all_queues(netdev);
3343 netdev->irq = pdev->irq;
3344
3345 netdev->features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6;
3346 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Dimitris Michailidis87b6cf52010-04-27 16:22:42 -07003347 netdev->features |= NETIF_F_GRO | NETIF_F_RXHASH | highdma;
Dimitris Michailidisb8ff05a92010-04-01 15:28:26 +00003348 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3349 netdev->vlan_features = netdev->features & VLAN_FEAT;
3350
3351 netdev->netdev_ops = &cxgb4_netdev_ops;
3352 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
3353 }
3354
3355 pci_set_drvdata(pdev, adapter);
3356
3357 if (adapter->flags & FW_OK) {
3358 err = t4_port_init(adapter, 0, 0, 0);
3359 if (err)
3360 goto out_free_dev;
3361 }
3362
3363 /*
3364 * Configure queues and allocate tables now, they can be needed as
3365 * soon as the first register_netdev completes.
3366 */
3367 cfg_queues(adapter);
3368
3369 adapter->l2t = t4_init_l2t();
3370 if (!adapter->l2t) {
3371 /* We tolerate a lack of L2T, giving up some functionality */
3372 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
3373 adapter->params.offload = 0;
3374 }
3375
3376 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
3377 dev_warn(&pdev->dev, "could not allocate TID table, "
3378 "continuing\n");
3379 adapter->params.offload = 0;
3380 }
3381
3382 /*
3383 * The card is now ready to go. If any errors occur during device
3384 * registration we do not fail the whole card but rather proceed only
3385 * with the ports we manage to register successfully. However we must
3386 * register at least one net device.
3387 */
3388 for_each_port(adapter, i) {
3389 err = register_netdev(adapter->port[i]);
3390 if (err)
3391 dev_warn(&pdev->dev,
3392 "cannot register net device %s, skipping\n",
3393 adapter->port[i]->name);
3394 else {
3395 /*
3396 * Change the name we use for messages to the name of
3397 * the first successfully registered interface.
3398 */
3399 if (!adapter->registered_device_map)
3400 adapter->name = adapter->port[i]->name;
3401
3402 __set_bit(i, &adapter->registered_device_map);
3403 adapter->chan_map[adap2pinfo(adapter, i)->tx_chan] = i;
3404 }
3405 }
3406 if (!adapter->registered_device_map) {
3407 dev_err(&pdev->dev, "could not register any net devices\n");
3408 goto out_free_dev;
3409 }
3410
3411 if (cxgb4_debugfs_root) {
3412 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
3413 cxgb4_debugfs_root);
3414 setup_debugfs(adapter);
3415 }
3416
3417 /* See what interrupts we'll be using */
3418 if (msi > 1 && enable_msix(adapter) == 0)
3419 adapter->flags |= USING_MSIX;
3420 else if (msi > 0 && pci_enable_msi(pdev) == 0)
3421 adapter->flags |= USING_MSI;
3422
3423 if (is_offload(adapter))
3424 attach_ulds(adapter);
3425
3426 print_port_info(adapter);
3427
3428sriov:
3429#ifdef CONFIG_PCI_IOV
3430 if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
3431 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
3432 dev_info(&pdev->dev,
3433 "instantiated %u virtual functions\n",
3434 num_vf[func]);
3435#endif
3436 return 0;
3437
3438 out_free_dev:
3439 t4_free_mem(adapter->tids.tid_tab);
3440 t4_free_mem(adapter->l2t);
3441 for_each_port(adapter, i)
3442 if (adapter->port[i])
3443 free_netdev(adapter->port[i]);
3444 if (adapter->flags & FW_OK)
3445 t4_fw_bye(adapter, 0);
3446 out_unmap_bar:
3447 iounmap(adapter->regs);
3448 out_free_adapter:
3449 kfree(adapter);
3450 out_disable_device:
3451 pci_disable_pcie_error_reporting(pdev);
3452 pci_disable_device(pdev);
3453 out_release_regions:
3454 pci_release_regions(pdev);
3455 pci_set_drvdata(pdev, NULL);
3456 return err;
3457}
3458
3459static void __devexit remove_one(struct pci_dev *pdev)
3460{
3461 struct adapter *adapter = pci_get_drvdata(pdev);
3462
3463 pci_disable_sriov(pdev);
3464
3465 if (adapter) {
3466 int i;
3467
3468 if (is_offload(adapter))
3469 detach_ulds(adapter);
3470
3471 for_each_port(adapter, i)
3472 if (test_bit(i, &adapter->registered_device_map))
3473 unregister_netdev(adapter->port[i]);
3474
3475 if (adapter->debugfs_root)
3476 debugfs_remove_recursive(adapter->debugfs_root);
3477
Dimitris Michailidisaaefae92010-05-18 10:07:12 +00003478 if (adapter->flags & FULL_INIT_DONE)
3479 cxgb_down(adapter);
Dimitris Michailidisb8ff05a92010-04-01 15:28:26 +00003480 t4_free_mem(adapter->l2t);
3481 t4_free_mem(adapter->tids.tid_tab);
3482 disable_msi(adapter);
3483
3484 for_each_port(adapter, i)
3485 if (adapter->port[i])
3486 free_netdev(adapter->port[i]);
3487
3488 if (adapter->flags & FW_OK)
3489 t4_fw_bye(adapter, 0);
3490 iounmap(adapter->regs);
3491 kfree(adapter);
3492 pci_disable_pcie_error_reporting(pdev);
3493 pci_disable_device(pdev);
3494 pci_release_regions(pdev);
3495 pci_set_drvdata(pdev, NULL);
3496 } else if (PCI_FUNC(pdev->devfn) > 0)
3497 pci_release_regions(pdev);
3498}
3499
3500static struct pci_driver cxgb4_driver = {
3501 .name = KBUILD_MODNAME,
3502 .id_table = cxgb4_pci_tbl,
3503 .probe = init_one,
3504 .remove = __devexit_p(remove_one),
Dimitris Michailidis204dc3c2010-06-18 10:05:29 +00003505 .err_handler = &cxgb4_eeh,
Dimitris Michailidisb8ff05a92010-04-01 15:28:26 +00003506};
3507
3508static int __init cxgb4_init_module(void)
3509{
3510 int ret;
3511
3512 /* Debugfs support is optional, just warn if this fails */
3513 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
3514 if (!cxgb4_debugfs_root)
3515 pr_warning("could not create debugfs entry, continuing\n");
3516
3517 ret = pci_register_driver(&cxgb4_driver);
3518 if (ret < 0)
3519 debugfs_remove(cxgb4_debugfs_root);
3520 return ret;
3521}
3522
3523static void __exit cxgb4_cleanup_module(void)
3524{
3525 pci_unregister_driver(&cxgb4_driver);
3526 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
3527}
3528
3529module_init(cxgb4_init_module);
3530module_exit(cxgb4_cleanup_module);