blob: 3d5e1a8929ae0f4785f5cfb8e4c3c6370bcf208e [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volamc7bb15a2013-03-06 20:05:05 +00002 * Copyright (C) 2005 - 2013 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070024
25MODULE_VERSION(DRV_VER);
26MODULE_DEVICE_TABLE(pci, be_dev_ids);
27MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000028MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070029MODULE_LICENSE("GPL");
30
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070034
Sathya Perla11ac75e2011-12-13 00:58:50 +000035static ushort rx_frag_size = 2048;
36module_param(rx_frag_size, ushort, S_IRUGO);
37MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070041 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000046 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070048 { 0 }
49};
50MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000051/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070052static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000053 "CEV",
54 "CTX",
55 "DBUF",
56 "ERX",
57 "Host",
58 "MPU",
59 "NDMA",
60 "PTC ",
61 "RDMA ",
62 "RXF ",
63 "RXIPS ",
64 "RXULP0 ",
65 "RXULP1 ",
66 "RXULP2 ",
67 "TIM ",
68 "TPOST ",
69 "TPRE ",
70 "TXIPS ",
71 "TXULP0 ",
72 "TXULP1 ",
73 "UC ",
74 "WDMA ",
75 "TXULP2 ",
76 "HOST1 ",
77 "P0_OB_LINK ",
78 "P1_OB_LINK ",
79 "HOST_GPIO ",
80 "MBOX ",
81 "AXGMAC0",
82 "AXGMAC1",
83 "JTAG",
84 "MPU_INTPEND"
85};
86/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070087static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000088 "LPCMEMHOST",
89 "MGMT_MAC",
90 "PCS0ONLINE",
91 "MPU_IRAM",
92 "PCS1ONLINE",
93 "PCTL0",
94 "PCTL1",
95 "PMEM",
96 "RR",
97 "TXPB",
98 "RXPP",
99 "XAUI",
100 "TXP",
101 "ARM",
102 "IPC",
103 "HOST2",
104 "HOST3",
105 "HOST4",
106 "HOST5",
107 "HOST6",
108 "HOST7",
109 "HOST8",
110 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700111 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown"
120};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700121
Sathya Perla752961a2011-10-24 02:45:03 +0000122/* Is BE in a multi-channel mode */
123static inline bool be_is_mc(struct be_adapter *adapter) {
124 return (adapter->function_mode & FLEX10_MODE ||
125 adapter->function_mode & VNIC_MODE ||
126 adapter->function_mode & UMC_ENABLED);
127}
128
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700129static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130{
131 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000132 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000135 mem->va = NULL;
136 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140 u16 len, u16 entry_size)
141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000148 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
Joe Perches1f9061d22013-03-15 07:23:58 +0000149 GFP_KERNEL | __GFP_ZERO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000151 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152 return 0;
153}
154
Somnath Kotur68c45a22013-03-14 02:42:07 +0000155static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156{
Sathya Perladb3ea782011-08-22 19:41:52 +0000157 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000158
Sathya Perladb3ea782011-08-22 19:41:52 +0000159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160 &reg);
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169
Sathya Perladb3ea782011-08-22 19:41:52 +0000170 pci_write_config_dword(adapter->pdev,
171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172}
173
Somnath Kotur68c45a22013-03-14 02:42:07 +0000174static void be_intr_set(struct be_adapter *adapter, bool enable)
175{
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
182 if (adapter->eeh_error)
183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188}
189
Sathya Perla8788fdc2009-07-27 22:52:03 +0000190static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700191{
192 u32 val = 0;
193 val |= qid & DB_RQ_RING_ID_MASK;
194 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000195
196 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000197 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700198}
199
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000200static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
201 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700202{
203 u32 val = 0;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000204 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000206
207 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000208 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700209}
210
Sathya Perla8788fdc2009-07-27 22:52:03 +0000211static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700212 bool arm, bool clear_int, u16 num_popped)
213{
214 u32 val = 0;
215 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000216 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
217 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000218
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000219 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000220 return;
221
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700222 if (arm)
223 val |= 1 << DB_EQ_REARM_SHIFT;
224 if (clear_int)
225 val |= 1 << DB_EQ_CLR_SHIFT;
226 val |= 1 << DB_EQ_EVNT_SHIFT;
227 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000228 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700229}
230
Sathya Perla8788fdc2009-07-27 22:52:03 +0000231void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700232{
233 u32 val = 0;
234 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000235 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
236 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000237
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000238 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000239 return;
240
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700241 if (arm)
242 val |= 1 << DB_CQ_REARM_SHIFT;
243 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000244 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700245}
246
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700247static int be_mac_addr_set(struct net_device *netdev, void *p)
248{
249 struct be_adapter *adapter = netdev_priv(netdev);
250 struct sockaddr *addr = p;
251 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000252 u8 current_mac[ETH_ALEN];
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000253 u32 pmac_id = adapter->pmac_id[0];
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000254 bool active_mac = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700255
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000256 if (!is_valid_ether_addr(addr->sa_data))
257 return -EADDRNOTAVAIL;
258
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000259 /* For BE VF, MAC address is already activated by PF.
260 * Hence only operation left is updating netdev->devaddr.
261 * Update it if user is passing the same MAC which was used
262 * during configuring VF MAC from PF(Hypervisor).
263 */
264 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
265 status = be_cmd_mac_addr_query(adapter, current_mac,
266 false, adapter->if_handle, 0);
267 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
268 goto done;
269 else
270 goto err;
271 }
272
273 if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
274 goto done;
275
276 /* For Lancer check if any MAC is active.
277 * If active, get its mac id.
278 */
279 if (lancer_chip(adapter) && !be_physfn(adapter))
280 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
281 &pmac_id, 0);
282
283 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
284 adapter->if_handle,
285 &adapter->pmac_id[0], 0);
286
Sathya Perlaa65027e2009-08-17 00:58:04 +0000287 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000288 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700289
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000290 if (active_mac)
291 be_cmd_pmac_del(adapter, adapter->if_handle,
292 pmac_id, 0);
293done:
Somnath Koture3a7ae22011-10-27 07:14:05 +0000294 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
295 return 0;
296err:
297 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700298 return status;
299}
300
Sathya Perlaca34fe382012-11-06 17:48:56 +0000301/* BE2 supports only v0 cmd */
302static void *hw_stats_from_cmd(struct be_adapter *adapter)
303{
304 if (BE2_chip(adapter)) {
305 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
306
307 return &cmd->hw_stats;
308 } else {
309 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
310
311 return &cmd->hw_stats;
312 }
313}
314
315/* BE2 supports only v0 cmd */
316static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
317{
318 if (BE2_chip(adapter)) {
319 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
320
321 return &hw_stats->erx;
322 } else {
323 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
324
325 return &hw_stats->erx;
326 }
327}
328
329static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000330{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000331 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
333 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000334 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000335 &rxf_stats->port[adapter->port_num];
336 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000337
Sathya Perlaac124ff2011-07-25 19:10:14 +0000338 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000339 drvs->rx_pause_frames = port_stats->rx_pause_frames;
340 drvs->rx_crc_errors = port_stats->rx_crc_errors;
341 drvs->rx_control_frames = port_stats->rx_control_frames;
342 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
343 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
344 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
345 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
346 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
347 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
348 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
349 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
350 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
351 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
352 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000353 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000354 drvs->rx_dropped_header_too_small =
355 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000356 drvs->rx_address_filtered =
357 port_stats->rx_address_filtered +
358 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000359 drvs->rx_alignment_symbol_errors =
360 port_stats->rx_alignment_symbol_errors;
361
362 drvs->tx_pauseframes = port_stats->tx_pauseframes;
363 drvs->tx_controlframes = port_stats->tx_controlframes;
364
365 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000366 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000367 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000368 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000369 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000370 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000371 drvs->forwarded_packets = rxf_stats->forwarded_packets;
372 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000373 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
374 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000375 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
376}
377
Sathya Perlaca34fe382012-11-06 17:48:56 +0000378static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000379{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000380 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
381 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
382 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000383 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000384 &rxf_stats->port[adapter->port_num];
385 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000386
Sathya Perlaac124ff2011-07-25 19:10:14 +0000387 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000388 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
389 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000390 drvs->rx_pause_frames = port_stats->rx_pause_frames;
391 drvs->rx_crc_errors = port_stats->rx_crc_errors;
392 drvs->rx_control_frames = port_stats->rx_control_frames;
393 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
394 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
395 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
396 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
397 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
398 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
399 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
400 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
401 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
402 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
403 drvs->rx_dropped_header_too_small =
404 port_stats->rx_dropped_header_too_small;
405 drvs->rx_input_fifo_overflow_drop =
406 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000407 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000408 drvs->rx_alignment_symbol_errors =
409 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000410 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000411 drvs->tx_pauseframes = port_stats->tx_pauseframes;
412 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000413 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000414 drvs->jabber_events = port_stats->jabber_events;
415 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000416 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000417 drvs->forwarded_packets = rxf_stats->forwarded_packets;
418 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000419 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
420 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000421 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
422}
423
Selvin Xavier005d5692011-05-16 07:36:35 +0000424static void populate_lancer_stats(struct be_adapter *adapter)
425{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000426
Selvin Xavier005d5692011-05-16 07:36:35 +0000427 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000428 struct lancer_pport_stats *pport_stats =
429 pport_stats_from_cmd(adapter);
430
431 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
432 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
433 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
434 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000435 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000436 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000437 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
438 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
439 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
440 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
441 drvs->rx_dropped_tcp_length =
442 pport_stats->rx_dropped_invalid_tcp_length;
443 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
444 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
445 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
446 drvs->rx_dropped_header_too_small =
447 pport_stats->rx_dropped_header_too_small;
448 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000449 drvs->rx_address_filtered =
450 pport_stats->rx_address_filtered +
451 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000452 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000453 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000454 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
455 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000456 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000457 drvs->forwarded_packets = pport_stats->num_forwards_lo;
458 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000459 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000460 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000461}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000462
Sathya Perla09c1c682011-08-22 19:41:53 +0000463static void accumulate_16bit_val(u32 *acc, u16 val)
464{
465#define lo(x) (x & 0xFFFF)
466#define hi(x) (x & 0xFFFF0000)
467 bool wrapped = val < lo(*acc);
468 u32 newacc = hi(*acc) + val;
469
470 if (wrapped)
471 newacc += 65536;
472 ACCESS_ONCE(*acc) = newacc;
473}
474
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000475void populate_erx_stats(struct be_adapter *adapter,
476 struct be_rx_obj *rxo,
477 u32 erx_stat)
478{
479 if (!BEx_chip(adapter))
480 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
481 else
482 /* below erx HW counter can actually wrap around after
483 * 65535. Driver accumulates a 32-bit value
484 */
485 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
486 (u16)erx_stat);
487}
488
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000489void be_parse_stats(struct be_adapter *adapter)
490{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000491 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
492 struct be_rx_obj *rxo;
493 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000494 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000495
Sathya Perlaca34fe382012-11-06 17:48:56 +0000496 if (lancer_chip(adapter)) {
497 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000498 } else {
Sathya Perlaca34fe382012-11-06 17:48:56 +0000499 if (BE2_chip(adapter))
500 populate_be_v0_stats(adapter);
501 else
502 /* for BE3 and Skyhawk */
503 populate_be_v1_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000504
Sathya Perlaca34fe382012-11-06 17:48:56 +0000505 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
506 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000507 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
508 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe382012-11-06 17:48:56 +0000509 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000510 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000511}
512
Sathya Perlaab1594e2011-07-25 19:10:15 +0000513static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
514 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700515{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000516 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000517 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700518 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000519 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000520 u64 pkts, bytes;
521 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700522 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700523
Sathya Perla3abcded2010-10-03 22:12:27 -0700524 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000525 const struct be_rx_stats *rx_stats = rx_stats(rxo);
526 do {
527 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
528 pkts = rx_stats(rxo)->rx_pkts;
529 bytes = rx_stats(rxo)->rx_bytes;
530 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
531 stats->rx_packets += pkts;
532 stats->rx_bytes += bytes;
533 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
534 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
535 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700536 }
537
Sathya Perla3c8def92011-06-12 20:01:58 +0000538 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000539 const struct be_tx_stats *tx_stats = tx_stats(txo);
540 do {
541 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
542 pkts = tx_stats(txo)->tx_pkts;
543 bytes = tx_stats(txo)->tx_bytes;
544 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
545 stats->tx_packets += pkts;
546 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000547 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700548
549 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000550 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000551 drvs->rx_alignment_symbol_errors +
552 drvs->rx_in_range_errors +
553 drvs->rx_out_range_errors +
554 drvs->rx_frame_too_long +
555 drvs->rx_dropped_too_small +
556 drvs->rx_dropped_too_short +
557 drvs->rx_dropped_header_too_small +
558 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000559 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700560
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700561 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000562 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000563 drvs->rx_out_range_errors +
564 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000565
Sathya Perlaab1594e2011-07-25 19:10:15 +0000566 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700567
568 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000569 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000570
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700571 /* receiver fifo overrun */
572 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000573 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000574 drvs->rx_input_fifo_overflow_drop +
575 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000576 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700577}
578
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000579void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700580{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700581 struct net_device *netdev = adapter->netdev;
582
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000583 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000584 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000585 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700586 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000587
588 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
589 netif_carrier_on(netdev);
590 else
591 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700592}
593
Sathya Perla3c8def92011-06-12 20:01:58 +0000594static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000595 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700596{
Sathya Perla3c8def92011-06-12 20:01:58 +0000597 struct be_tx_stats *stats = tx_stats(txo);
598
Sathya Perlaab1594e2011-07-25 19:10:15 +0000599 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000600 stats->tx_reqs++;
601 stats->tx_wrbs += wrb_cnt;
602 stats->tx_bytes += copied;
603 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700604 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000605 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000606 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700607}
608
609/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000610static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
611 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700612{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700613 int cnt = (skb->len > skb->data_len);
614
615 cnt += skb_shinfo(skb)->nr_frags;
616
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700617 /* to account for hdr wrb */
618 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000619 if (lancer_chip(adapter) || !(cnt & 1)) {
620 *dummy = false;
621 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700622 /* add a dummy to make it an even num */
623 cnt++;
624 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000625 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700626 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
627 return cnt;
628}
629
630static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
631{
632 wrb->frag_pa_hi = upper_32_bits(addr);
633 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
634 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000635 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700636}
637
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000638static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
639 struct sk_buff *skb)
640{
641 u8 vlan_prio;
642 u16 vlan_tag;
643
644 vlan_tag = vlan_tx_tag_get(skb);
645 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
646 /* If vlan priority provided by OS is NOT in available bmap */
647 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
648 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
649 adapter->recommended_prio;
650
651 return vlan_tag;
652}
653
Somnath Koturcc4ce022010-10-21 07:11:14 -0700654static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000655 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700656{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000657 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700658
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700659 memset(hdr, 0, sizeof(*hdr));
660
661 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
662
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000663 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700664 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
665 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
666 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000667 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000668 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700669 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
670 if (is_tcp_pkt(skb))
671 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
672 else if (is_udp_pkt(skb))
673 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
674 }
675
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700676 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700677 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000678 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700679 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700680 }
681
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000682 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
683 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700684 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700685 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
686 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
687}
688
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000689static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000690 bool unmap_single)
691{
692 dma_addr_t dma;
693
694 be_dws_le_to_cpu(wrb, sizeof(*wrb));
695
696 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000697 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000698 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000699 dma_unmap_single(dev, dma, wrb->frag_len,
700 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000701 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000702 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000703 }
704}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700705
Sathya Perla3c8def92011-06-12 20:01:58 +0000706static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000707 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
708 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700709{
Sathya Perla7101e112010-03-22 20:41:12 +0000710 dma_addr_t busaddr;
711 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000712 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700713 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700714 struct be_eth_wrb *wrb;
715 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000716 bool map_single = false;
717 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700718
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700719 hdr = queue_head_node(txq);
720 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000721 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700722
David S. Millerebc8d2a2009-06-09 01:01:31 -0700723 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700724 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000725 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
726 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000727 goto dma_err;
728 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700729 wrb = queue_head_node(txq);
730 wrb_fill(wrb, busaddr, len);
731 be_dws_cpu_to_le(wrb, sizeof(*wrb));
732 queue_head_inc(txq);
733 copied += len;
734 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700735
David S. Millerebc8d2a2009-06-09 01:01:31 -0700736 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000737 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700738 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000739 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000740 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000741 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000742 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700743 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000744 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700745 be_dws_cpu_to_le(wrb, sizeof(*wrb));
746 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000747 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700748 }
749
750 if (dummy_wrb) {
751 wrb = queue_head_node(txq);
752 wrb_fill(wrb, 0, 0);
753 be_dws_cpu_to_le(wrb, sizeof(*wrb));
754 queue_head_inc(txq);
755 }
756
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000757 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700758 be_dws_cpu_to_le(hdr, sizeof(*hdr));
759
760 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000761dma_err:
762 txq->head = map_head;
763 while (copied) {
764 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000765 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000766 map_single = false;
767 copied -= wrb->frag_len;
768 queue_head_inc(txq);
769 }
770 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700771}
772
Somnath Kotur93040ae2012-06-26 22:32:10 +0000773static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000774 struct sk_buff *skb,
775 bool *skip_hw_vlan)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000776{
777 u16 vlan_tag = 0;
778
779 skb = skb_share_check(skb, GFP_ATOMIC);
780 if (unlikely(!skb))
781 return skb;
782
Sarveshwar Bandiefee8e82013-05-13 20:28:20 +0000783 if (vlan_tx_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000784 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandiefee8e82013-05-13 20:28:20 +0000785 else if (qnq_async_evt_rcvd(adapter) && adapter->pvid)
786 vlan_tag = adapter->pvid;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000787
788 if (vlan_tag) {
David S. Miller58717682013-04-30 03:50:54 -0400789 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000790 if (unlikely(!skb))
791 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000792 skb->vlan_tci = 0;
Sarveshwar Bandiefee8e82013-05-13 20:28:20 +0000793 if (skip_hw_vlan)
794 *skip_hw_vlan = true;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000795 }
796
797 /* Insert the outer VLAN, if any */
798 if (adapter->qnq_vid) {
799 vlan_tag = adapter->qnq_vid;
David S. Miller58717682013-04-30 03:50:54 -0400800 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000801 if (unlikely(!skb))
802 return skb;
803 if (skip_hw_vlan)
804 *skip_hw_vlan = true;
805 }
806
Somnath Kotur93040ae2012-06-26 22:32:10 +0000807 return skb;
808}
809
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000810static bool be_ipv6_exthdr_check(struct sk_buff *skb)
811{
812 struct ethhdr *eh = (struct ethhdr *)skb->data;
813 u16 offset = ETH_HLEN;
814
815 if (eh->h_proto == htons(ETH_P_IPV6)) {
816 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
817
818 offset += sizeof(struct ipv6hdr);
819 if (ip6h->nexthdr != NEXTHDR_TCP &&
820 ip6h->nexthdr != NEXTHDR_UDP) {
821 struct ipv6_opt_hdr *ehdr =
822 (struct ipv6_opt_hdr *) (skb->data + offset);
823
824 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
825 if (ehdr->hdrlen == 0xff)
826 return true;
827 }
828 }
829 return false;
830}
831
832static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
833{
834 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
835}
836
Sathya Perlaee9c7992013-05-22 23:04:55 +0000837static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
838 struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000839{
Sathya Perlaee9c7992013-05-22 23:04:55 +0000840 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000841}
842
Sathya Perlaee9c7992013-05-22 23:04:55 +0000843static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
844 struct sk_buff *skb,
845 bool *skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700846{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000847 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +0000848 unsigned int eth_hdr_len;
849 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000850
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000851 /* For padded packets, BE HW modifies tot_len field in IP header
852 * incorrecly when VLAN tag is inserted by HW.
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000853 */
Sathya Perlaee9c7992013-05-22 23:04:55 +0000854 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
855 VLAN_ETH_HLEN : ETH_HLEN;
856 if (skb->len <= 60 && vlan_tx_tag_present(skb) &&
857 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +0000858 ip = (struct iphdr *)ip_hdr(skb);
859 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
860 }
861
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000862 /* If vlan tag is already inlined in the packet, skip HW VLAN
863 * tagging in UMC mode
864 */
865 if ((adapter->function_mode & UMC_ENABLED) &&
866 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sathya Perlaee9c7992013-05-22 23:04:55 +0000867 *skip_hw_vlan = true;
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000868
Somnath Kotur93040ae2012-06-26 22:32:10 +0000869 /* HW has a bug wherein it will calculate CSUM for VLAN
870 * pkts even though it is disabled.
871 * Manually insert VLAN in pkt.
872 */
873 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000874 vlan_tx_tag_present(skb)) {
875 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000876 if (unlikely(!skb))
877 goto tx_drop;
878 }
879
880 /* HW may lockup when VLAN HW tagging is requested on
881 * certain ipv6 packets. Drop such pkts if the HW workaround to
882 * skip HW tagging is not enabled by FW.
883 */
884 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000885 (adapter->pvid || adapter->qnq_vid) &&
886 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000887 goto tx_drop;
888
889 /* Manual VLAN tag insertion to prevent:
890 * ASIC lockup when the ASIC inserts VLAN tag into
891 * certain ipv6 packets. Insert VLAN tags in driver,
892 * and set event, completion, vlan bits accordingly
893 * in the Tx WRB.
894 */
895 if (be_ipv6_tx_stall_chk(adapter, skb) &&
896 be_vlan_tag_tx_chk(adapter, skb)) {
Sathya Perlaee9c7992013-05-22 23:04:55 +0000897 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000898 if (unlikely(!skb))
899 goto tx_drop;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000900 }
901
Sathya Perlaee9c7992013-05-22 23:04:55 +0000902 return skb;
903tx_drop:
904 dev_kfree_skb_any(skb);
905 return NULL;
906}
907
908static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
909{
910 struct be_adapter *adapter = netdev_priv(netdev);
911 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
912 struct be_queue_info *txq = &txo->q;
913 bool dummy_wrb, stopped = false;
914 u32 wrb_cnt = 0, copied = 0;
915 bool skip_hw_vlan = false;
916 u32 start = txq->head;
917
918 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
919 if (!skb)
920 return NETDEV_TX_OK;
921
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000922 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700923
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000924 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
925 skip_hw_vlan);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000926 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000927 int gso_segs = skb_shinfo(skb)->gso_segs;
928
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000929 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000930 BUG_ON(txo->sent_skb_list[start]);
931 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700932
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000933 /* Ensure txq has space for the next skb; Else stop the queue
934 * *BEFORE* ringing the tx doorbell, so that we serialze the
935 * tx compls of the current transmit which'll wake up the queue
936 */
Sathya Perla7101e112010-03-22 20:41:12 +0000937 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000938 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
939 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000940 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000941 stopped = true;
942 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700943
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000944 be_txq_notify(adapter, txo, wrb_cnt);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000945
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000946 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000947 } else {
948 txq->head = start;
949 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700950 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700951 return NETDEV_TX_OK;
952}
953
954static int be_change_mtu(struct net_device *netdev, int new_mtu)
955{
956 struct be_adapter *adapter = netdev_priv(netdev);
957 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000958 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
959 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700960 dev_info(&adapter->pdev->dev,
961 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000962 BE_MIN_MTU,
963 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700964 return -EINVAL;
965 }
966 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
967 netdev->mtu, new_mtu);
968 netdev->mtu = new_mtu;
969 return 0;
970}
971
972/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000973 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
974 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700975 */
Sathya Perla10329df2012-06-05 19:37:18 +0000976static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700977{
Sathya Perla10329df2012-06-05 19:37:18 +0000978 u16 vids[BE_NUM_VLANS_SUPPORTED];
979 u16 num = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000980 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000981
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000982 /* No need to further configure vids if in promiscuous mode */
983 if (adapter->promiscuous)
984 return 0;
985
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000986 if (adapter->vlans_added > adapter->max_vlans)
987 goto set_vlan_promisc;
988
989 /* Construct VLAN Table to give to HW */
990 for (i = 0; i < VLAN_N_VID; i++)
991 if (adapter->vlan_tag[i])
Sathya Perla10329df2012-06-05 19:37:18 +0000992 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000993
994 status = be_cmd_vlan_config(adapter, adapter->if_handle,
Sathya Perla10329df2012-06-05 19:37:18 +0000995 vids, num, 1, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000996
997 /* Set to VLAN promisc mode as setting VLAN filter failed */
998 if (status) {
999 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
1000 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
1001 goto set_vlan_promisc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001002 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001003
Sathya Perlab31c50a2009-09-17 10:30:13 -07001004 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001005
1006set_vlan_promisc:
1007 status = be_cmd_vlan_config(adapter, adapter->if_handle,
1008 NULL, 0, 1, 1);
1009 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001010}
1011
Patrick McHardy80d5c362013-04-19 02:04:28 +00001012static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001013{
1014 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001015 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001016
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001017 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001018 status = -EINVAL;
1019 goto ret;
1020 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001021
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001022 /* Packets with VID 0 are always received by Lancer by default */
1023 if (lancer_chip(adapter) && vid == 0)
1024 goto ret;
1025
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001026 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001027 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Sathya Perla10329df2012-06-05 19:37:18 +00001028 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -05001029
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001030 if (!status)
1031 adapter->vlans_added++;
1032 else
1033 adapter->vlan_tag[vid] = 0;
1034ret:
1035 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001036}
1037
Patrick McHardy80d5c362013-04-19 02:04:28 +00001038static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001039{
1040 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001041 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001042
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001043 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001044 status = -EINVAL;
1045 goto ret;
1046 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001047
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001048 /* Packets with VID 0 are always received by Lancer by default */
1049 if (lancer_chip(adapter) && vid == 0)
1050 goto ret;
1051
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001052 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001053 if (adapter->vlans_added <= adapter->max_vlans)
Sathya Perla10329df2012-06-05 19:37:18 +00001054 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -05001055
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001056 if (!status)
1057 adapter->vlans_added--;
1058 else
1059 adapter->vlan_tag[vid] = 1;
1060ret:
1061 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001062}
1063
Sathya Perlaa54769f2011-10-24 02:45:00 +00001064static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001065{
1066 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001067 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001068
1069 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001070 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001071 adapter->promiscuous = true;
1072 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001073 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001074
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001075 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +00001076 if (adapter->promiscuous) {
1077 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +00001078 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001079
1080 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001081 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001082 }
1083
Sathya Perlae7b909a2009-11-22 22:01:10 +00001084 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001085 if (netdev->flags & IFF_ALLMULTI ||
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00001086 netdev_mc_count(netdev) > adapter->max_mcast_mac) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001087 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001088 goto done;
1089 }
1090
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001091 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1092 struct netdev_hw_addr *ha;
1093 int i = 1; /* First slot is claimed by the Primary MAC */
1094
1095 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1096 be_cmd_pmac_del(adapter, adapter->if_handle,
1097 adapter->pmac_id[i], 0);
1098 }
1099
1100 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
1101 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1102 adapter->promiscuous = true;
1103 goto done;
1104 }
1105
1106 netdev_for_each_uc_addr(ha, adapter->netdev) {
1107 adapter->uc_macs++; /* First slot is for Primary MAC */
1108 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1109 adapter->if_handle,
1110 &adapter->pmac_id[adapter->uc_macs], 0);
1111 }
1112 }
1113
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001114 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1115
1116 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1117 if (status) {
1118 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1119 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1120 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1121 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001122done:
1123 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001124}
1125
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001126static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1127{
1128 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001129 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001130 int status;
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +00001131 bool active_mac = false;
1132 u32 pmac_id;
1133 u8 old_mac[ETH_ALEN];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001134
Sathya Perla11ac75e2011-12-13 00:58:50 +00001135 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001136 return -EPERM;
1137
Sathya Perla11ac75e2011-12-13 00:58:50 +00001138 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001139 return -EINVAL;
1140
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001141 if (lancer_chip(adapter)) {
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +00001142 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1143 &pmac_id, vf + 1);
1144 if (!status && active_mac)
1145 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1146 pmac_id, vf + 1);
1147
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001148 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
1149 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +00001150 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1151 vf_cfg->pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001152
Sathya Perla11ac75e2011-12-13 00:58:50 +00001153 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1154 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001155 }
1156
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001157 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001158 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1159 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001160 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00001161 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001162
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001163 return status;
1164}
1165
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001166static int be_get_vf_config(struct net_device *netdev, int vf,
1167 struct ifla_vf_info *vi)
1168{
1169 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001170 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001171
Sathya Perla11ac75e2011-12-13 00:58:50 +00001172 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001173 return -EPERM;
1174
Sathya Perla11ac75e2011-12-13 00:58:50 +00001175 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001176 return -EINVAL;
1177
1178 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001179 vi->tx_rate = vf_cfg->tx_rate;
1180 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001181 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001182 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001183
1184 return 0;
1185}
1186
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001187static int be_set_vf_vlan(struct net_device *netdev,
1188 int vf, u16 vlan, u8 qos)
1189{
1190 struct be_adapter *adapter = netdev_priv(netdev);
1191 int status = 0;
1192
Sathya Perla11ac75e2011-12-13 00:58:50 +00001193 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001194 return -EPERM;
1195
Sathya Perla11ac75e2011-12-13 00:58:50 +00001196 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001197 return -EINVAL;
1198
1199 if (vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001200 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1201 /* If this is new value, program it. Else skip. */
1202 adapter->vf_cfg[vf].vlan_tag = vlan;
1203
1204 status = be_cmd_set_hsw_config(adapter, vlan,
1205 vf + 1, adapter->vf_cfg[vf].if_handle);
1206 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001207 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001208 /* Reset Transparent Vlan Tagging. */
Sathya Perla11ac75e2011-12-13 00:58:50 +00001209 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001210 vlan = adapter->vf_cfg[vf].def_vid;
1211 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1212 adapter->vf_cfg[vf].if_handle);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001213 }
1214
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001215
1216 if (status)
1217 dev_info(&adapter->pdev->dev,
1218 "VLAN %d config on VF %d failed\n", vlan, vf);
1219 return status;
1220}
1221
Ajit Khapardee1d18732010-07-23 01:52:13 +00001222static int be_set_vf_tx_rate(struct net_device *netdev,
1223 int vf, int rate)
1224{
1225 struct be_adapter *adapter = netdev_priv(netdev);
1226 int status = 0;
1227
Sathya Perla11ac75e2011-12-13 00:58:50 +00001228 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001229 return -EPERM;
1230
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001231 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001232 return -EINVAL;
1233
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001234 if (rate < 100 || rate > 10000) {
1235 dev_err(&adapter->pdev->dev,
1236 "tx rate must be between 100 and 10000 Mbps\n");
1237 return -EINVAL;
1238 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001239
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00001240 if (lancer_chip(adapter))
1241 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1242 else
1243 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001244
1245 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001246 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001247 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001248 else
1249 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001250 return status;
1251}
1252
Sathya Perla39f1d942012-05-08 19:41:24 +00001253static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1254{
1255 struct pci_dev *dev, *pdev = adapter->pdev;
Ivan Vecera2f6a0262012-10-01 01:56:55 +00001256 int vfs = 0, assigned_vfs = 0, pos;
Sathya Perla39f1d942012-05-08 19:41:24 +00001257 u16 offset, stride;
1258
1259 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
Sathya Perlad79c0a22012-06-05 19:37:22 +00001260 if (!pos)
1261 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00001262 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1263 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1264
1265 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1266 while (dev) {
Ivan Vecera2f6a0262012-10-01 01:56:55 +00001267 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
Sathya Perla39f1d942012-05-08 19:41:24 +00001268 vfs++;
1269 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1270 assigned_vfs++;
1271 }
1272 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1273 }
1274 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1275}
1276
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001277static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001278{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001279 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
Sathya Perla4097f662009-03-24 16:40:13 -07001280 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001281 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001282 u64 pkts;
1283 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001284
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001285 if (!eqo->enable_aic) {
1286 eqd = eqo->eqd;
1287 goto modify_eqd;
1288 }
1289
1290 if (eqo->idx >= adapter->num_rx_qs)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001291 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001292
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001293 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1294
Sathya Perla4097f662009-03-24 16:40:13 -07001295 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001296 if (time_before(now, stats->rx_jiffies)) {
1297 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001298 return;
1299 }
1300
Sathya Perlaac124ff2011-07-25 19:10:14 +00001301 /* Update once a second */
1302 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001303 return;
1304
Sathya Perlaab1594e2011-07-25 19:10:15 +00001305 do {
1306 start = u64_stats_fetch_begin_bh(&stats->sync);
1307 pkts = stats->rx_pkts;
1308 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1309
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001310 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001311 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001312 stats->rx_jiffies = now;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001313 eqd = (stats->rx_pps / 110000) << 3;
1314 eqd = min(eqd, eqo->max_eqd);
1315 eqd = max(eqd, eqo->min_eqd);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001316 if (eqd < 10)
1317 eqd = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001318
1319modify_eqd:
1320 if (eqd != eqo->cur_eqd) {
1321 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1322 eqo->cur_eqd = eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001323 }
Sathya Perla4097f662009-03-24 16:40:13 -07001324}
1325
Sathya Perla3abcded2010-10-03 22:12:27 -07001326static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001327 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001328{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001329 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001330
Sathya Perlaab1594e2011-07-25 19:10:15 +00001331 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001332 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001333 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001334 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001335 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001336 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001337 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001338 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001339 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001340}
1341
Sathya Perla2e588f82011-03-11 02:49:26 +00001342static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001343{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001344 /* L4 checksum is not reliable for non TCP/UDP packets.
1345 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001346 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1347 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001348}
1349
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001350static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1351 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001352{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001353 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001354 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001355 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001356
Sathya Perla3abcded2010-10-03 22:12:27 -07001357 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001358 BUG_ON(!rx_page_info->page);
1359
Ajit Khaparde205859a2010-02-09 01:34:21 +00001360 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001361 dma_unmap_page(&adapter->pdev->dev,
1362 dma_unmap_addr(rx_page_info, bus),
1363 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001364 rx_page_info->last_page_user = false;
1365 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001366
1367 atomic_dec(&rxq->used);
1368 return rx_page_info;
1369}
1370
1371/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001372static void be_rx_compl_discard(struct be_rx_obj *rxo,
1373 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001374{
Sathya Perla3abcded2010-10-03 22:12:27 -07001375 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001376 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001377 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001378
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001379 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001380 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001381 put_page(page_info->page);
1382 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001383 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001384 }
1385}
1386
1387/*
1388 * skb_fill_rx_data forms a complete skb for an ether frame
1389 * indicated by rxcp.
1390 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001391static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1392 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001393{
Sathya Perla3abcded2010-10-03 22:12:27 -07001394 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001395 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001396 u16 i, j;
1397 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001398 u8 *start;
1399
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001400 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001401 start = page_address(page_info->page) + page_info->page_offset;
1402 prefetch(start);
1403
1404 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001405 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001406
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001407 skb->len = curr_frag_len;
1408 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001409 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001410 /* Complete packet has now been moved to data */
1411 put_page(page_info->page);
1412 skb->data_len = 0;
1413 skb->tail += curr_frag_len;
1414 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001415 hdr_len = ETH_HLEN;
1416 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001417 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001418 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001419 skb_shinfo(skb)->frags[0].page_offset =
1420 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001421 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001422 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001423 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001424 skb->tail += hdr_len;
1425 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001426 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001427
Sathya Perla2e588f82011-03-11 02:49:26 +00001428 if (rxcp->pkt_size <= rx_frag_size) {
1429 BUG_ON(rxcp->num_rcvd != 1);
1430 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001431 }
1432
1433 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001434 index_inc(&rxcp->rxq_idx, rxq->len);
1435 remaining = rxcp->pkt_size - curr_frag_len;
1436 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001437 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001438 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001439
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001440 /* Coalesce all frags from the same physical page in one slot */
1441 if (page_info->page_offset == 0) {
1442 /* Fresh page */
1443 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001444 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001445 skb_shinfo(skb)->frags[j].page_offset =
1446 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001447 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001448 skb_shinfo(skb)->nr_frags++;
1449 } else {
1450 put_page(page_info->page);
1451 }
1452
Eric Dumazet9e903e02011-10-18 21:00:24 +00001453 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001454 skb->len += curr_frag_len;
1455 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001456 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001457 remaining -= curr_frag_len;
1458 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001459 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001460 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001461 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001462}
1463
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001464/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001465static void be_rx_compl_process(struct be_rx_obj *rxo,
1466 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001467{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001468 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001469 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001470 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001471
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001472 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001473 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001474 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001475 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001476 return;
1477 }
1478
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001479 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001480
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001481 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001482 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001483 else
1484 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001485
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001486 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001487 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001488 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001489 skb->rxhash = rxcp->rss_hash;
1490
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001491
Jiri Pirko343e43c2011-08-25 02:50:51 +00001492 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001493 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001494
1495 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001496}
1497
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001498/* Process the RX completion indicated by rxcp when GRO is enabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001499void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1500 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001501{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001502 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001503 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001504 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001505 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001506 u16 remaining, curr_frag_len;
1507 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001508
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001509 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001510 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001511 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001512 return;
1513 }
1514
Sathya Perla2e588f82011-03-11 02:49:26 +00001515 remaining = rxcp->pkt_size;
1516 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001517 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001518
1519 curr_frag_len = min(remaining, rx_frag_size);
1520
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001521 /* Coalesce all frags from the same physical page in one slot */
1522 if (i == 0 || page_info->page_offset == 0) {
1523 /* First frag or Fresh page */
1524 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001525 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001526 skb_shinfo(skb)->frags[j].page_offset =
1527 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001528 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001529 } else {
1530 put_page(page_info->page);
1531 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001532 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001533 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001534 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001535 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001536 memset(page_info, 0, sizeof(*page_info));
1537 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001538 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001539
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001540 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001541 skb->len = rxcp->pkt_size;
1542 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001543 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001544 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001545 if (adapter->netdev->features & NETIF_F_RXHASH)
1546 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001547
Jiri Pirko343e43c2011-08-25 02:50:51 +00001548 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001549 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001550
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001551 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001552}
1553
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001554static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1555 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001556{
Sathya Perla2e588f82011-03-11 02:49:26 +00001557 rxcp->pkt_size =
1558 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1559 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1560 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1561 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001562 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001563 rxcp->ip_csum =
1564 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1565 rxcp->l4_csum =
1566 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1567 rxcp->ipv6 =
1568 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1569 rxcp->rxq_idx =
1570 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1571 rxcp->num_rcvd =
1572 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1573 rxcp->pkt_type =
1574 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001575 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001576 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001577 if (rxcp->vlanf) {
1578 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001579 compl);
1580 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1581 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001582 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001583 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001584}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001585
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001586static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1587 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001588{
1589 rxcp->pkt_size =
1590 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1591 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1592 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1593 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001594 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001595 rxcp->ip_csum =
1596 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1597 rxcp->l4_csum =
1598 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1599 rxcp->ipv6 =
1600 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1601 rxcp->rxq_idx =
1602 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1603 rxcp->num_rcvd =
1604 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1605 rxcp->pkt_type =
1606 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001607 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001608 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001609 if (rxcp->vlanf) {
1610 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001611 compl);
1612 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1613 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001614 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001615 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001616}
1617
1618static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1619{
1620 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1621 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1622 struct be_adapter *adapter = rxo->adapter;
1623
1624 /* For checking the valid bit it is Ok to use either definition as the
1625 * valid bit is at the same position in both v0 and v1 Rx compl */
1626 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001627 return NULL;
1628
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001629 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001630 be_dws_le_to_cpu(compl, sizeof(*compl));
1631
1632 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001633 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001634 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001635 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001636
Sathya Perla15d72182011-03-21 20:49:26 +00001637 if (rxcp->vlanf) {
1638 /* vlanf could be wrongly set in some cards.
1639 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001640 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001641 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001642
Sathya Perla15d72182011-03-21 20:49:26 +00001643 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001644 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001645
Somnath Kotur939cf302011-08-18 21:51:49 -07001646 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001647 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001648 rxcp->vlanf = 0;
1649 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001650
1651 /* As the compl has been parsed, reset it; we wont touch it again */
1652 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001653
Sathya Perla3abcded2010-10-03 22:12:27 -07001654 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001655 return rxcp;
1656}
1657
Eric Dumazet1829b082011-03-01 05:48:12 +00001658static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001659{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001660 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001661
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001662 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001663 gfp |= __GFP_COMP;
1664 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001665}
1666
1667/*
1668 * Allocate a page, split it to fragments of size rx_frag_size and post as
1669 * receive buffers to BE
1670 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001671static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001672{
Sathya Perla3abcded2010-10-03 22:12:27 -07001673 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001674 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001675 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001676 struct page *pagep = NULL;
1677 struct be_eth_rx_d *rxd;
1678 u64 page_dmaaddr = 0, frag_dmaaddr;
1679 u32 posted, page_offset = 0;
1680
Sathya Perla3abcded2010-10-03 22:12:27 -07001681 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001682 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1683 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001684 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001685 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001686 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001687 break;
1688 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001689 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1690 0, adapter->big_page_size,
1691 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001692 page_info->page_offset = 0;
1693 } else {
1694 get_page(pagep);
1695 page_info->page_offset = page_offset + rx_frag_size;
1696 }
1697 page_offset = page_info->page_offset;
1698 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001699 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001700 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1701
1702 rxd = queue_head_node(rxq);
1703 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1704 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001705
1706 /* Any space left in the current big page for another frag? */
1707 if ((page_offset + rx_frag_size + rx_frag_size) >
1708 adapter->big_page_size) {
1709 pagep = NULL;
1710 page_info->last_page_user = true;
1711 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001712
1713 prev_page_info = page_info;
1714 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001715 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001716 }
1717 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001718 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001719
1720 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001721 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001722 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001723 } else if (atomic_read(&rxq->used) == 0) {
1724 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001725 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001726 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001727}
1728
Sathya Perla5fb379e2009-06-18 00:02:59 +00001729static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001730{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001731 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1732
1733 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1734 return NULL;
1735
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001736 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001737 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1738
1739 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1740
1741 queue_tail_inc(tx_cq);
1742 return txcp;
1743}
1744
Sathya Perla3c8def92011-06-12 20:01:58 +00001745static u16 be_tx_compl_process(struct be_adapter *adapter,
1746 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001747{
Sathya Perla3c8def92011-06-12 20:01:58 +00001748 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001749 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001750 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001751 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001752 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1753 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001754
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001755 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001756 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001757 sent_skbs[txq->tail] = NULL;
1758
1759 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001760 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001761
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001762 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001763 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001764 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001765 unmap_tx_frag(&adapter->pdev->dev, wrb,
1766 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001767 unmap_skb_hdr = false;
1768
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001769 num_wrbs++;
1770 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001771 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001772
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001773 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001774 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001775}
1776
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001777/* Return the number of events in the event queue */
1778static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001779{
1780 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001781 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001782
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001783 do {
1784 eqe = queue_tail_node(&eqo->q);
1785 if (eqe->evt == 0)
1786 break;
1787
1788 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001789 eqe->evt = 0;
1790 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001791 queue_tail_inc(&eqo->q);
1792 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001793
1794 return num;
1795}
1796
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001797/* Leaves the EQ is disarmed state */
1798static void be_eq_clean(struct be_eq_obj *eqo)
1799{
1800 int num = events_get(eqo);
1801
1802 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1803}
1804
1805static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001806{
1807 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001808 struct be_queue_info *rxq = &rxo->q;
1809 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001810 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00001811 struct be_adapter *adapter = rxo->adapter;
1812 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001813 u16 tail;
1814
Sathya Perlad23e9462012-12-17 19:38:51 +00001815 /* Consume pending rx completions.
1816 * Wait for the flush completion (identified by zero num_rcvd)
1817 * to arrive. Notify CQ even when there are no more CQ entries
1818 * for HW to flush partially coalesced CQ entries.
1819 * In Lancer, there is no need to wait for flush compl.
1820 */
1821 for (;;) {
1822 rxcp = be_rx_compl_get(rxo);
1823 if (rxcp == NULL) {
1824 if (lancer_chip(adapter))
1825 break;
1826
1827 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1828 dev_warn(&adapter->pdev->dev,
1829 "did not receive flush compl\n");
1830 break;
1831 }
1832 be_cq_notify(adapter, rx_cq->id, true, 0);
1833 mdelay(1);
1834 } else {
1835 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00001836 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00001837 if (rxcp->num_rcvd == 0)
1838 break;
1839 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001840 }
1841
Sathya Perlad23e9462012-12-17 19:38:51 +00001842 /* After cleanup, leave the CQ in unarmed state */
1843 be_cq_notify(adapter, rx_cq->id, false, 0);
1844
1845 /* Then free posted rx buffers that were not used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001846 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001847 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001848 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001849 put_page(page_info->page);
1850 memset(page_info, 0, sizeof(*page_info));
1851 }
1852 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001853 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001854}
1855
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001856static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001857{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001858 struct be_tx_obj *txo;
1859 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001860 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001861 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001862 struct sk_buff *sent_skb;
1863 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001864 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001865
Sathya Perlaa8e91792009-08-10 03:42:43 +00001866 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1867 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001868 pending_txqs = adapter->num_tx_qs;
1869
1870 for_all_tx_queues(adapter, txo, i) {
1871 txq = &txo->q;
1872 while ((txcp = be_tx_compl_get(&txo->cq))) {
1873 end_idx =
1874 AMAP_GET_BITS(struct amap_eth_tx_compl,
1875 wrb_index, txcp);
1876 num_wrbs += be_tx_compl_process(adapter, txo,
1877 end_idx);
1878 cmpl++;
1879 }
1880 if (cmpl) {
1881 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1882 atomic_sub(num_wrbs, &txq->used);
1883 cmpl = 0;
1884 num_wrbs = 0;
1885 }
1886 if (atomic_read(&txq->used) == 0)
1887 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001888 }
1889
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001890 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001891 break;
1892
1893 mdelay(1);
1894 } while (true);
1895
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001896 for_all_tx_queues(adapter, txo, i) {
1897 txq = &txo->q;
1898 if (atomic_read(&txq->used))
1899 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1900 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001901
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001902 /* free posted tx for which compls will never arrive */
1903 while (atomic_read(&txq->used)) {
1904 sent_skb = txo->sent_skb_list[txq->tail];
1905 end_idx = txq->tail;
1906 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1907 &dummy_wrb);
1908 index_adv(&end_idx, num_wrbs - 1, txq->len);
1909 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1910 atomic_sub(num_wrbs, &txq->used);
1911 }
Sathya Perlab03388d2010-02-18 00:37:17 +00001912 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001913}
1914
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001915static void be_evt_queues_destroy(struct be_adapter *adapter)
1916{
1917 struct be_eq_obj *eqo;
1918 int i;
1919
1920 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001921 if (eqo->q.created) {
1922 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001923 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001924 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001925 be_queue_free(adapter, &eqo->q);
1926 }
1927}
1928
1929static int be_evt_queues_create(struct be_adapter *adapter)
1930{
1931 struct be_queue_info *eq;
1932 struct be_eq_obj *eqo;
1933 int i, rc;
1934
1935 adapter->num_evt_qs = num_irqs(adapter);
1936
1937 for_all_evt_queues(adapter, eqo, i) {
1938 eqo->adapter = adapter;
1939 eqo->tx_budget = BE_TX_BUDGET;
1940 eqo->idx = i;
1941 eqo->max_eqd = BE_MAX_EQD;
1942 eqo->enable_aic = true;
1943
1944 eq = &eqo->q;
1945 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1946 sizeof(struct be_eq_entry));
1947 if (rc)
1948 return rc;
1949
1950 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1951 if (rc)
1952 return rc;
1953 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00001954 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001955}
1956
Sathya Perla5fb379e2009-06-18 00:02:59 +00001957static void be_mcc_queues_destroy(struct be_adapter *adapter)
1958{
1959 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001960
Sathya Perla8788fdc2009-07-27 22:52:03 +00001961 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001962 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001963 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001964 be_queue_free(adapter, q);
1965
Sathya Perla8788fdc2009-07-27 22:52:03 +00001966 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001967 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001968 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001969 be_queue_free(adapter, q);
1970}
1971
1972/* Must be called only after TX qs are created as MCC shares TX EQ */
1973static int be_mcc_queues_create(struct be_adapter *adapter)
1974{
1975 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001976
Sathya Perla8788fdc2009-07-27 22:52:03 +00001977 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001978 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001979 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001980 goto err;
1981
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001982 /* Use the default EQ for MCC completions */
1983 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001984 goto mcc_cq_free;
1985
Sathya Perla8788fdc2009-07-27 22:52:03 +00001986 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001987 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1988 goto mcc_cq_destroy;
1989
Sathya Perla8788fdc2009-07-27 22:52:03 +00001990 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001991 goto mcc_q_free;
1992
1993 return 0;
1994
1995mcc_q_free:
1996 be_queue_free(adapter, q);
1997mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001998 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001999mcc_cq_free:
2000 be_queue_free(adapter, cq);
2001err:
2002 return -1;
2003}
2004
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002005static void be_tx_queues_destroy(struct be_adapter *adapter)
2006{
2007 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002008 struct be_tx_obj *txo;
2009 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002010
Sathya Perla3c8def92011-06-12 20:01:58 +00002011 for_all_tx_queues(adapter, txo, i) {
2012 q = &txo->q;
2013 if (q->created)
2014 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2015 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002016
Sathya Perla3c8def92011-06-12 20:01:58 +00002017 q = &txo->cq;
2018 if (q->created)
2019 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2020 be_queue_free(adapter, q);
2021 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002022}
2023
Sathya Perladafc0fe2011-10-24 02:45:02 +00002024static int be_num_txqs_want(struct be_adapter *adapter)
2025{
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002026 if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
2027 be_is_mc(adapter) ||
2028 (!lancer_chip(adapter) && !be_physfn(adapter)) ||
Sathya Perlaca34fe382012-11-06 17:48:56 +00002029 BE2_chip(adapter))
Sathya Perladafc0fe2011-10-24 02:45:02 +00002030 return 1;
2031 else
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002032 return adapter->max_tx_queues;
Sathya Perladafc0fe2011-10-24 02:45:02 +00002033}
2034
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002035static int be_tx_cqs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002036{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002037 struct be_queue_info *cq, *eq;
2038 int status;
Sathya Perla3c8def92011-06-12 20:01:58 +00002039 struct be_tx_obj *txo;
2040 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002041
Sathya Perladafc0fe2011-10-24 02:45:02 +00002042 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00002043 if (adapter->num_tx_qs != MAX_TX_QS) {
2044 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00002045 netif_set_real_num_tx_queues(adapter->netdev,
2046 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00002047 rtnl_unlock();
2048 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00002049
Sathya Perla3c8def92011-06-12 20:01:58 +00002050 for_all_tx_queues(adapter, txo, i) {
2051 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002052 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2053 sizeof(struct be_eth_tx_compl));
2054 if (status)
2055 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002056
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002057 /* If num_evt_qs is less than num_tx_qs, then more than
2058 * one txq share an eq
2059 */
2060 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2061 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2062 if (status)
2063 return status;
Sathya Perla3c8def92011-06-12 20:01:58 +00002064 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002065 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002066}
2067
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002068static int be_tx_qs_create(struct be_adapter *adapter)
2069{
2070 struct be_tx_obj *txo;
2071 int i, status;
2072
2073 for_all_tx_queues(adapter, txo, i) {
2074 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2075 sizeof(struct be_eth_wrb));
2076 if (status)
2077 return status;
2078
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002079 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002080 if (status)
2081 return status;
2082 }
2083
Sathya Perlad3791422012-09-28 04:39:44 +00002084 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2085 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002086 return 0;
2087}
2088
2089static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002090{
2091 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002092 struct be_rx_obj *rxo;
2093 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002094
Sathya Perla3abcded2010-10-03 22:12:27 -07002095 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002096 q = &rxo->cq;
2097 if (q->created)
2098 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2099 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002100 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002101}
2102
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002103static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002104{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002105 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002106 struct be_rx_obj *rxo;
2107 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002108
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002109 /* We'll create as many RSS rings as there are irqs.
2110 * But when there's only one irq there's no use creating RSS rings
2111 */
2112 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
2113 num_irqs(adapter) + 1 : 1;
Sathya Perla7f640062012-06-05 19:37:20 +00002114 if (adapter->num_rx_qs != MAX_RX_QS) {
2115 rtnl_lock();
2116 netif_set_real_num_rx_queues(adapter->netdev,
2117 adapter->num_rx_qs);
2118 rtnl_unlock();
2119 }
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002120
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002121 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002122 for_all_rx_queues(adapter, rxo, i) {
2123 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002124 cq = &rxo->cq;
2125 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2126 sizeof(struct be_eth_rx_compl));
2127 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002128 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002129
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002130 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2131 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002132 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002133 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002134 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002135
Sathya Perlad3791422012-09-28 04:39:44 +00002136 dev_info(&adapter->pdev->dev,
2137 "created %d RSS queue(s) and 1 default RX queue\n",
2138 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002139 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002140}
2141
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002142static irqreturn_t be_intx(int irq, void *dev)
2143{
Sathya Perlae49cc342012-11-27 19:50:02 +00002144 struct be_eq_obj *eqo = dev;
2145 struct be_adapter *adapter = eqo->adapter;
2146 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002147
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002148 /* IRQ is not expected when NAPI is scheduled as the EQ
2149 * will not be armed.
2150 * But, this can happen on Lancer INTx where it takes
2151 * a while to de-assert INTx or in BE2 where occasionaly
2152 * an interrupt may be raised even when EQ is unarmed.
2153 * If NAPI is already scheduled, then counting & notifying
2154 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002155 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002156 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002157 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002158 __napi_schedule(&eqo->napi);
2159 if (num_evts)
2160 eqo->spurious_intr = 0;
2161 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002162 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002163
2164 /* Return IRQ_HANDLED only for the the first spurious intr
2165 * after a valid intr to stop the kernel from branding
2166 * this irq as a bad one!
2167 */
2168 if (num_evts || eqo->spurious_intr++ == 0)
2169 return IRQ_HANDLED;
2170 else
2171 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002172}
2173
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002174static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002175{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002176 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002177
Sathya Perla0b545a62012-11-23 00:27:18 +00002178 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2179 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002180 return IRQ_HANDLED;
2181}
2182
Sathya Perla2e588f82011-03-11 02:49:26 +00002183static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002184{
Sathya Perla2e588f82011-03-11 02:49:26 +00002185 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002186}
2187
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002188static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2189 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002190{
Sathya Perla3abcded2010-10-03 22:12:27 -07002191 struct be_adapter *adapter = rxo->adapter;
2192 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002193 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002194 u32 work_done;
2195
2196 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002197 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002198 if (!rxcp)
2199 break;
2200
Sathya Perla12004ae2011-08-02 19:57:46 +00002201 /* Is it a flush compl that has no data */
2202 if (unlikely(rxcp->num_rcvd == 0))
2203 goto loop_continue;
2204
2205 /* Discard compl with partial DMA Lancer B0 */
2206 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002207 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002208 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002209 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002210
Sathya Perla12004ae2011-08-02 19:57:46 +00002211 /* On BE drop pkts that arrive due to imperfect filtering in
2212 * promiscuous mode on some skews
2213 */
2214 if (unlikely(rxcp->port != adapter->port_num &&
2215 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002216 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002217 goto loop_continue;
2218 }
2219
2220 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002221 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002222 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002223 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002224loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002225 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002226 }
2227
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002228 if (work_done) {
2229 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002230
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002231 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2232 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002233 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002234
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002235 return work_done;
2236}
2237
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002238static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2239 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002240{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002241 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002242 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002243
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002244 for (work_done = 0; work_done < budget; work_done++) {
2245 txcp = be_tx_compl_get(&txo->cq);
2246 if (!txcp)
2247 break;
2248 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002249 AMAP_GET_BITS(struct amap_eth_tx_compl,
2250 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002251 }
2252
2253 if (work_done) {
2254 be_cq_notify(adapter, txo->cq.id, true, work_done);
2255 atomic_sub(num_wrbs, &txo->q.used);
2256
2257 /* As Tx wrbs have been freed up, wake up netdev queue
2258 * if it was stopped due to lack of tx wrbs. */
2259 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2260 atomic_read(&txo->q.used) < txo->q.len / 2) {
2261 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002262 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002263
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002264 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2265 tx_stats(txo)->tx_compl += work_done;
2266 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2267 }
2268 return (work_done < budget); /* Done */
2269}
Sathya Perla3c8def92011-06-12 20:01:58 +00002270
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002271int be_poll(struct napi_struct *napi, int budget)
2272{
2273 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2274 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002275 int max_work = 0, work, i, num_evts;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002276 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002277
Sathya Perla0b545a62012-11-23 00:27:18 +00002278 num_evts = events_get(eqo);
2279
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002280 /* Process all TXQs serviced by this EQ */
2281 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2282 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2283 eqo->tx_budget, i);
2284 if (!tx_done)
2285 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002286 }
2287
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002288 /* This loop will iterate twice for EQ0 in which
2289 * completions of the last RXQ (default one) are also processed
2290 * For other EQs the loop iterates only once
2291 */
2292 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2293 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2294 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002295 }
2296
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002297 if (is_mcc_eqo(eqo))
2298 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002299
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002300 if (max_work < budget) {
2301 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002302 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002303 } else {
2304 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002305 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002306 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002307 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002308}
2309
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002310void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002311{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002312 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2313 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002314 u32 i;
2315
Sathya Perlad23e9462012-12-17 19:38:51 +00002316 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002317 return;
2318
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002319 if (lancer_chip(adapter)) {
2320 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2321 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2322 sliport_err1 = ioread32(adapter->db +
2323 SLIPORT_ERROR1_OFFSET);
2324 sliport_err2 = ioread32(adapter->db +
2325 SLIPORT_ERROR2_OFFSET);
2326 }
2327 } else {
2328 pci_read_config_dword(adapter->pdev,
2329 PCICFG_UE_STATUS_LOW, &ue_lo);
2330 pci_read_config_dword(adapter->pdev,
2331 PCICFG_UE_STATUS_HIGH, &ue_hi);
2332 pci_read_config_dword(adapter->pdev,
2333 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2334 pci_read_config_dword(adapter->pdev,
2335 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002336
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002337 ue_lo = (ue_lo & ~ue_lo_mask);
2338 ue_hi = (ue_hi & ~ue_hi_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002339 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002340
Ajit Khaparde1451ae62012-10-08 18:18:21 +00002341 /* On certain platforms BE hardware can indicate spurious UEs.
2342 * Allow the h/w to stop working completely in case of a real UE.
2343 * Hence not setting the hw_error for UE detection.
2344 */
2345 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002346 adapter->hw_error = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002347 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002348 "Error detected in the card\n");
2349 }
2350
2351 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2352 dev_err(&adapter->pdev->dev,
2353 "ERR: sliport status 0x%x\n", sliport_status);
2354 dev_err(&adapter->pdev->dev,
2355 "ERR: sliport error1 0x%x\n", sliport_err1);
2356 dev_err(&adapter->pdev->dev,
2357 "ERR: sliport error2 0x%x\n", sliport_err2);
Ajit Khaparded053de92010-09-03 06:23:30 +00002358 }
2359
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002360 if (ue_lo) {
2361 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2362 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002363 dev_err(&adapter->pdev->dev,
2364 "UE: %s bit set\n", ue_status_low_desc[i]);
2365 }
2366 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002367
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002368 if (ue_hi) {
2369 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2370 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002371 dev_err(&adapter->pdev->dev,
2372 "UE: %s bit set\n", ue_status_hi_desc[i]);
2373 }
2374 }
2375
2376}
2377
Sathya Perla8d56ff12009-11-22 22:02:26 +00002378static void be_msix_disable(struct be_adapter *adapter)
2379{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002380 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002381 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002382 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002383 }
2384}
2385
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002386static uint be_num_rss_want(struct be_adapter *adapter)
2387{
Yuval Mintz30e80b52012-07-01 03:19:00 +00002388 u32 num = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002389
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002390 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002391 (lancer_chip(adapter) ||
2392 (!sriov_want(adapter) && be_physfn(adapter)))) {
2393 num = adapter->max_rss_queues;
Yuval Mintz30e80b52012-07-01 03:19:00 +00002394 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2395 }
2396 return num;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002397}
2398
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002399static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002400{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002401#define BE_MIN_MSIX_VECTORS 1
Parav Pandit045508a2012-03-26 14:27:13 +00002402 int i, status, num_vec, num_roce_vec = 0;
Sathya Perlad3791422012-09-28 04:39:44 +00002403 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002404
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002405 /* If RSS queues are not used, need a vec for default RX Q */
2406 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
Parav Pandit045508a2012-03-26 14:27:13 +00002407 if (be_roce_supported(adapter)) {
2408 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2409 (num_online_cpus() + 1));
2410 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2411 num_vec += num_roce_vec;
2412 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2413 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002414 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
Sathya Perla3abcded2010-10-03 22:12:27 -07002415
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002416 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002417 adapter->msix_entries[i].entry = i;
2418
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002419 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002420 if (status == 0) {
2421 goto done;
2422 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002423 num_vec = status;
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002424 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2425 num_vec);
2426 if (!status)
Sathya Perla3abcded2010-10-03 22:12:27 -07002427 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002428 }
Sathya Perlad3791422012-09-28 04:39:44 +00002429
2430 dev_warn(dev, "MSIx enable failed\n");
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002431 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2432 if (!be_physfn(adapter))
2433 return status;
2434 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002435done:
Parav Pandit045508a2012-03-26 14:27:13 +00002436 if (be_roce_supported(adapter)) {
2437 if (num_vec > num_roce_vec) {
2438 adapter->num_msix_vec = num_vec - num_roce_vec;
2439 adapter->num_msix_roce_vec =
2440 num_vec - adapter->num_msix_vec;
2441 } else {
2442 adapter->num_msix_vec = num_vec;
2443 adapter->num_msix_roce_vec = 0;
2444 }
2445 } else
2446 adapter->num_msix_vec = num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002447 dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002448 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002449}
2450
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002451static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002452 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002453{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002454 return adapter->msix_entries[eqo->idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002455}
2456
2457static int be_msix_register(struct be_adapter *adapter)
2458{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002459 struct net_device *netdev = adapter->netdev;
2460 struct be_eq_obj *eqo;
2461 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002462
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002463 for_all_evt_queues(adapter, eqo, i) {
2464 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2465 vec = be_msix_vec_get(adapter, eqo);
2466 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002467 if (status)
2468 goto err_msix;
2469 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002470
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002471 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002472err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002473 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2474 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2475 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2476 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002477 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002478 return status;
2479}
2480
2481static int be_irq_register(struct be_adapter *adapter)
2482{
2483 struct net_device *netdev = adapter->netdev;
2484 int status;
2485
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002486 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002487 status = be_msix_register(adapter);
2488 if (status == 0)
2489 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002490 /* INTx is not supported for VF */
2491 if (!be_physfn(adapter))
2492 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002493 }
2494
Sathya Perlae49cc342012-11-27 19:50:02 +00002495 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002496 netdev->irq = adapter->pdev->irq;
2497 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002498 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002499 if (status) {
2500 dev_err(&adapter->pdev->dev,
2501 "INTx request IRQ failed - err %d\n", status);
2502 return status;
2503 }
2504done:
2505 adapter->isr_registered = true;
2506 return 0;
2507}
2508
2509static void be_irq_unregister(struct be_adapter *adapter)
2510{
2511 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002512 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002513 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002514
2515 if (!adapter->isr_registered)
2516 return;
2517
2518 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002519 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002520 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002521 goto done;
2522 }
2523
2524 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002525 for_all_evt_queues(adapter, eqo, i)
2526 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002527
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002528done:
2529 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002530}
2531
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002532static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002533{
2534 struct be_queue_info *q;
2535 struct be_rx_obj *rxo;
2536 int i;
2537
2538 for_all_rx_queues(adapter, rxo, i) {
2539 q = &rxo->q;
2540 if (q->created) {
2541 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002542 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002543 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002544 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002545 }
2546}
2547
Sathya Perla889cd4b2010-05-30 23:33:45 +00002548static int be_close(struct net_device *netdev)
2549{
2550 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002551 struct be_eq_obj *eqo;
2552 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002553
Parav Pandit045508a2012-03-26 14:27:13 +00002554 be_roce_dev_close(adapter);
2555
Somnath Kotur04d3d622013-05-02 03:36:55 +00002556 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2557 for_all_evt_queues(adapter, eqo, i)
2558 napi_disable(&eqo->napi);
2559 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2560 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002561
2562 be_async_mcc_disable(adapter);
2563
2564 /* Wait for all pending tx completions to arrive so that
2565 * all tx skbs are freed.
2566 */
2567 be_tx_compl_clean(adapter);
Sathya Perlafba87552013-05-08 02:05:50 +00002568 netif_tx_disable(netdev);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002569
2570 be_rx_qs_destroy(adapter);
2571
2572 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002573 if (msix_enabled(adapter))
2574 synchronize_irq(be_msix_vec_get(adapter, eqo));
2575 else
2576 synchronize_irq(netdev->irq);
2577 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002578 }
2579
Sathya Perla889cd4b2010-05-30 23:33:45 +00002580 be_irq_unregister(adapter);
2581
Sathya Perla482c9e72011-06-29 23:33:17 +00002582 return 0;
2583}
2584
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002585static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002586{
2587 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002588 int rc, i, j;
2589 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002590
2591 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002592 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2593 sizeof(struct be_eth_rx_d));
2594 if (rc)
2595 return rc;
2596 }
2597
2598 /* The FW would like the default RXQ to be created first */
2599 rxo = default_rxo(adapter);
2600 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2601 adapter->if_handle, false, &rxo->rss_id);
2602 if (rc)
2603 return rc;
2604
2605 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002606 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002607 rx_frag_size, adapter->if_handle,
2608 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002609 if (rc)
2610 return rc;
2611 }
2612
2613 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002614 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2615 for_all_rss_queues(adapter, rxo, i) {
2616 if ((j + i) >= 128)
2617 break;
2618 rsstable[j + i] = rxo->rss_id;
2619 }
2620 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002621 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2622 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2623
2624 if (!BEx_chip(adapter))
2625 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2626 RSS_ENABLE_UDP_IPV6;
2627
2628 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2629 128);
2630 if (rc) {
2631 adapter->rss_flags = 0;
Sathya Perla482c9e72011-06-29 23:33:17 +00002632 return rc;
Suresh Reddy594ad542013-04-25 23:03:20 +00002633 }
Sathya Perla482c9e72011-06-29 23:33:17 +00002634 }
2635
2636 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002637 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002638 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002639 return 0;
2640}
2641
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002642static int be_open(struct net_device *netdev)
2643{
2644 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002645 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002646 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002647 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002648 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002649 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002650
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002651 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002652 if (status)
2653 goto err;
2654
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002655 status = be_irq_register(adapter);
2656 if (status)
2657 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002658
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002659 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002660 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002661
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002662 for_all_tx_queues(adapter, txo, i)
2663 be_cq_notify(adapter, txo->cq.id, true, 0);
2664
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002665 be_async_mcc_enable(adapter);
2666
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002667 for_all_evt_queues(adapter, eqo, i) {
2668 napi_enable(&eqo->napi);
2669 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2670 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00002671 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002672
Sathya Perla323ff712012-09-28 04:39:43 +00002673 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002674 if (!status)
2675 be_link_status_update(adapter, link_status);
2676
Sathya Perlafba87552013-05-08 02:05:50 +00002677 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00002678 be_roce_dev_open(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002679 return 0;
2680err:
2681 be_close(adapter->netdev);
2682 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002683}
2684
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002685static int be_setup_wol(struct be_adapter *adapter, bool enable)
2686{
2687 struct be_dma_mem cmd;
2688 int status = 0;
2689 u8 mac[ETH_ALEN];
2690
2691 memset(mac, 0, ETH_ALEN);
2692
2693 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002694 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
Joe Perches1f9061d22013-03-15 07:23:58 +00002695 GFP_KERNEL | __GFP_ZERO);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002696 if (cmd.va == NULL)
2697 return -1;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002698
2699 if (enable) {
2700 status = pci_write_config_dword(adapter->pdev,
2701 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2702 if (status) {
2703 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002704 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002705 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2706 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002707 return status;
2708 }
2709 status = be_cmd_enable_magic_wol(adapter,
2710 adapter->netdev->dev_addr, &cmd);
2711 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2712 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2713 } else {
2714 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2715 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2716 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2717 }
2718
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002719 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002720 return status;
2721}
2722
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002723/*
2724 * Generate a seed MAC address from the PF MAC Address using jhash.
2725 * MAC Address for VFs are assigned incrementally starting from the seed.
2726 * These addresses are programmed in the ASIC by the PF and the VF driver
2727 * queries for the MAC address during its probe.
2728 */
Sathya Perla4c876612013-02-03 20:30:11 +00002729static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002730{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002731 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002732 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002733 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002734 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002735
2736 be_vf_eth_addr_generate(adapter, mac);
2737
Sathya Perla11ac75e2011-12-13 00:58:50 +00002738 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002739 if (lancer_chip(adapter)) {
2740 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2741 } else {
2742 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002743 vf_cfg->if_handle,
2744 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002745 }
2746
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002747 if (status)
2748 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002749 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002750 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002751 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002752
2753 mac[5] += 1;
2754 }
2755 return status;
2756}
2757
Sathya Perla4c876612013-02-03 20:30:11 +00002758static int be_vfs_mac_query(struct be_adapter *adapter)
2759{
2760 int status, vf;
2761 u8 mac[ETH_ALEN];
2762 struct be_vf_cfg *vf_cfg;
2763 bool active;
2764
2765 for_all_vfs(adapter, vf_cfg, vf) {
2766 be_cmd_get_mac_from_list(adapter, mac, &active,
2767 &vf_cfg->pmac_id, 0);
2768
2769 status = be_cmd_mac_addr_query(adapter, mac, false,
2770 vf_cfg->if_handle, 0);
2771 if (status)
2772 return status;
2773 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2774 }
2775 return 0;
2776}
2777
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002778static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002779{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002780 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002781 u32 vf;
2782
Sathya Perla39f1d942012-05-08 19:41:24 +00002783 if (be_find_vfs(adapter, ASSIGNED)) {
Sathya Perla4c876612013-02-03 20:30:11 +00002784 dev_warn(&adapter->pdev->dev,
2785 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00002786 goto done;
2787 }
2788
Sathya Perlab4c1df92013-05-08 02:05:47 +00002789 pci_disable_sriov(adapter->pdev);
2790
Sathya Perla11ac75e2011-12-13 00:58:50 +00002791 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002792 if (lancer_chip(adapter))
2793 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2794 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002795 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2796 vf_cfg->pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002797
Sathya Perla11ac75e2011-12-13 00:58:50 +00002798 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2799 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002800done:
2801 kfree(adapter->vf_cfg);
2802 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002803}
2804
Sathya Perlaa54769f2011-10-24 02:45:00 +00002805static int be_clear(struct be_adapter *adapter)
2806{
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002807 int i = 1;
2808
Sathya Perla191eb752012-02-23 18:50:13 +00002809 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2810 cancel_delayed_work_sync(&adapter->work);
2811 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2812 }
2813
Sathya Perla11ac75e2011-12-13 00:58:50 +00002814 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002815 be_vf_clear(adapter);
2816
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002817 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2818 be_cmd_pmac_del(adapter, adapter->if_handle,
2819 adapter->pmac_id[i], 0);
2820
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002821 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002822
2823 be_mcc_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002824 be_rx_cqs_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002825 be_tx_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002826 be_evt_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002827
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002828 kfree(adapter->pmac_id);
2829 adapter->pmac_id = NULL;
2830
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002831 be_msix_disable(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002832 return 0;
2833}
2834
Sathya Perla4c876612013-02-03 20:30:11 +00002835static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002836{
Sathya Perla4c876612013-02-03 20:30:11 +00002837 struct be_vf_cfg *vf_cfg;
2838 u32 cap_flags, en_flags, vf;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002839 int status;
2840
Sathya Perla4c876612013-02-03 20:30:11 +00002841 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2842 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002843
Sathya Perla4c876612013-02-03 20:30:11 +00002844 for_all_vfs(adapter, vf_cfg, vf) {
2845 if (!BE3_chip(adapter))
Vasundhara Volama05f99d2013-04-21 23:28:17 +00002846 be_cmd_get_profile_config(adapter, &cap_flags,
2847 NULL, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00002848
2849 /* If a FW profile exists, then cap_flags are updated */
2850 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2851 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2852 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2853 &vf_cfg->if_handle, vf + 1);
2854 if (status)
2855 goto err;
2856 }
2857err:
2858 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002859}
2860
Sathya Perla39f1d942012-05-08 19:41:24 +00002861static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00002862{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002863 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002864 int vf;
2865
Sathya Perla39f1d942012-05-08 19:41:24 +00002866 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2867 GFP_KERNEL);
2868 if (!adapter->vf_cfg)
2869 return -ENOMEM;
2870
Sathya Perla11ac75e2011-12-13 00:58:50 +00002871 for_all_vfs(adapter, vf_cfg, vf) {
2872 vf_cfg->if_handle = -1;
2873 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002874 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002875 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00002876}
2877
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002878static int be_vf_setup(struct be_adapter *adapter)
2879{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002880 struct be_vf_cfg *vf_cfg;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002881 u16 def_vlan, lnk_speed;
Sathya Perla4c876612013-02-03 20:30:11 +00002882 int status, old_vfs, vf;
2883 struct device *dev = &adapter->pdev->dev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002884
Sathya Perla4c876612013-02-03 20:30:11 +00002885 old_vfs = be_find_vfs(adapter, ENABLED);
2886 if (old_vfs) {
2887 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2888 if (old_vfs != num_vfs)
2889 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2890 adapter->num_vfs = old_vfs;
Sathya Perla39f1d942012-05-08 19:41:24 +00002891 } else {
Sathya Perla4c876612013-02-03 20:30:11 +00002892 if (num_vfs > adapter->dev_num_vfs)
2893 dev_info(dev, "Device supports %d VFs and not %d\n",
2894 adapter->dev_num_vfs, num_vfs);
2895 adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
Sathya Perlab4c1df92013-05-08 02:05:47 +00002896 if (!adapter->num_vfs)
Sathya Perla4c876612013-02-03 20:30:11 +00002897 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00002898 }
2899
2900 status = be_vf_setup_init(adapter);
2901 if (status)
2902 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00002903
Sathya Perla4c876612013-02-03 20:30:11 +00002904 if (old_vfs) {
2905 for_all_vfs(adapter, vf_cfg, vf) {
2906 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2907 if (status)
2908 goto err;
2909 }
2910 } else {
2911 status = be_vfs_if_create(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002912 if (status)
2913 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002914 }
2915
Sathya Perla4c876612013-02-03 20:30:11 +00002916 if (old_vfs) {
2917 status = be_vfs_mac_query(adapter);
2918 if (status)
2919 goto err;
2920 } else {
Sathya Perla39f1d942012-05-08 19:41:24 +00002921 status = be_vf_eth_addr_config(adapter);
2922 if (status)
2923 goto err;
2924 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002925
Sathya Perla11ac75e2011-12-13 00:58:50 +00002926 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla4c876612013-02-03 20:30:11 +00002927 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2928 * Allow full available bandwidth
2929 */
2930 if (BE3_chip(adapter) && !old_vfs)
2931 be_cmd_set_qos(adapter, 1000, vf+1);
2932
2933 status = be_cmd_link_status_query(adapter, &lnk_speed,
2934 NULL, vf + 1);
2935 if (!status)
2936 vf_cfg->tx_rate = lnk_speed;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002937
2938 status = be_cmd_get_hsw_config(adapter, &def_vlan,
Sathya Perla4c876612013-02-03 20:30:11 +00002939 vf + 1, vf_cfg->if_handle);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002940 if (status)
2941 goto err;
2942 vf_cfg->def_vid = def_vlan;
Padmanabh Ratnakardcf7ebb2012-10-20 06:03:49 +00002943
2944 be_cmd_enable_vf(adapter, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002945 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00002946
2947 if (!old_vfs) {
2948 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2949 if (status) {
2950 dev_err(dev, "SRIOV enable failed\n");
2951 adapter->num_vfs = 0;
2952 goto err;
2953 }
2954 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002955 return 0;
2956err:
Sathya Perla4c876612013-02-03 20:30:11 +00002957 dev_err(dev, "VF setup failed\n");
2958 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002959 return status;
2960}
2961
Sathya Perla30128032011-11-10 19:17:57 +00002962static void be_setup_init(struct be_adapter *adapter)
2963{
2964 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002965 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002966 adapter->if_handle = -1;
2967 adapter->be3_native = false;
2968 adapter->promiscuous = false;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00002969 if (be_physfn(adapter))
2970 adapter->cmd_privileges = MAX_PRIVILEGES;
2971 else
2972 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00002973}
2974
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002975static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2976 bool *active_mac, u32 *pmac_id)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002977{
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002978 int status = 0;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002979
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002980 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2981 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2982 if (!lancer_chip(adapter) && !be_physfn(adapter))
2983 *active_mac = true;
2984 else
2985 *active_mac = false;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002986
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002987 return status;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002988 }
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002989
2990 if (lancer_chip(adapter)) {
2991 status = be_cmd_get_mac_from_list(adapter, mac,
2992 active_mac, pmac_id, 0);
2993 if (*active_mac) {
Sathya Perla5ee49792012-09-28 04:39:41 +00002994 status = be_cmd_mac_addr_query(adapter, mac, false,
2995 if_handle, *pmac_id);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002996 }
2997 } else if (be_physfn(adapter)) {
2998 /* For BE3, for PF get permanent MAC */
Sathya Perla5ee49792012-09-28 04:39:41 +00002999 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003000 *active_mac = false;
3001 } else {
3002 /* For BE3, for VF get soft MAC assigned by PF*/
Sathya Perla5ee49792012-09-28 04:39:41 +00003003 status = be_cmd_mac_addr_query(adapter, mac, false,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003004 if_handle, 0);
3005 *active_mac = true;
3006 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003007 return status;
3008}
3009
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003010static void be_get_resources(struct be_adapter *adapter)
3011{
Sathya Perla4c876612013-02-03 20:30:11 +00003012 u16 dev_num_vfs;
3013 int pos, status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003014 bool profile_present = false;
Vasundhara Volama05f99d2013-04-21 23:28:17 +00003015 u16 txq_count = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003016
Sathya Perla4c876612013-02-03 20:30:11 +00003017 if (!BEx_chip(adapter)) {
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003018 status = be_cmd_get_func_config(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003019 if (!status)
3020 profile_present = true;
Vasundhara Volama05f99d2013-04-21 23:28:17 +00003021 } else if (BE3_chip(adapter) && be_physfn(adapter)) {
3022 be_cmd_get_profile_config(adapter, NULL, &txq_count, 0);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003023 }
3024
3025 if (profile_present) {
3026 /* Sanity fixes for Lancer */
3027 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
3028 BE_UC_PMAC_COUNT);
3029 adapter->max_vlans = min_t(u16, adapter->max_vlans,
3030 BE_NUM_VLANS_SUPPORTED);
3031 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
3032 BE_MAX_MC);
3033 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3034 MAX_TX_QS);
3035 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
3036 BE3_MAX_RSS_QS);
3037 adapter->max_event_queues = min_t(u16,
3038 adapter->max_event_queues,
3039 BE3_MAX_RSS_QS);
3040
3041 if (adapter->max_rss_queues &&
3042 adapter->max_rss_queues == adapter->max_rx_queues)
3043 adapter->max_rss_queues -= 1;
3044
3045 if (adapter->max_event_queues < adapter->max_rss_queues)
3046 adapter->max_rss_queues = adapter->max_event_queues;
3047
3048 } else {
3049 if (be_physfn(adapter))
3050 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3051 else
3052 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3053
3054 if (adapter->function_mode & FLEX10_MODE)
3055 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3056 else
3057 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3058
3059 adapter->max_mcast_mac = BE_MAX_MC;
Vasundhara Volama05f99d2013-04-21 23:28:17 +00003060 adapter->max_tx_queues = txq_count ? txq_count : MAX_TX_QS;
3061 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3062 MAX_TX_QS);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003063 adapter->max_rss_queues = (adapter->be3_native) ?
3064 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3065 adapter->max_event_queues = BE3_MAX_RSS_QS;
3066
3067 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
3068 BE_IF_FLAGS_BROADCAST |
3069 BE_IF_FLAGS_MULTICAST |
3070 BE_IF_FLAGS_PASS_L3L4_ERRORS |
3071 BE_IF_FLAGS_MCAST_PROMISCUOUS |
3072 BE_IF_FLAGS_VLAN_PROMISCUOUS |
3073 BE_IF_FLAGS_PROMISCUOUS;
3074
3075 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3076 adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
3077 }
Sathya Perla4c876612013-02-03 20:30:11 +00003078
3079 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
3080 if (pos) {
3081 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
3082 &dev_num_vfs);
3083 if (BE3_chip(adapter))
3084 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
3085 adapter->dev_num_vfs = dev_num_vfs;
3086 }
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003087}
3088
Sathya Perla39f1d942012-05-08 19:41:24 +00003089/* Routine to query per function resource limits */
3090static int be_get_config(struct be_adapter *adapter)
3091{
Sathya Perla4c876612013-02-03 20:30:11 +00003092 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003093
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003094 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3095 &adapter->function_mode,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003096 &adapter->function_caps,
3097 &adapter->asic_rev);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003098 if (status)
3099 goto err;
3100
3101 be_get_resources(adapter);
3102
3103 /* primary mac needs 1 pmac entry */
3104 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3105 sizeof(u32), GFP_KERNEL);
3106 if (!adapter->pmac_id) {
3107 status = -ENOMEM;
3108 goto err;
3109 }
3110
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003111err:
3112 return status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003113}
3114
Sathya Perla5fb379e2009-06-18 00:02:59 +00003115static int be_setup(struct be_adapter *adapter)
3116{
Sathya Perla39f1d942012-05-08 19:41:24 +00003117 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003118 u32 en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003119 u32 tx_fc, rx_fc;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003120 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003121 u8 mac[ETH_ALEN];
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003122 bool active_mac;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003123
Sathya Perla30128032011-11-10 19:17:57 +00003124 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003125
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003126 if (!lancer_chip(adapter))
3127 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00003128
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003129 status = be_get_config(adapter);
3130 if (status)
3131 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003132
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003133 status = be_msix_enable(adapter);
3134 if (status)
3135 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003136
3137 status = be_evt_queues_create(adapter);
3138 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003139 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003140
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003141 status = be_tx_cqs_create(adapter);
3142 if (status)
3143 goto err;
3144
3145 status = be_rx_cqs_create(adapter);
3146 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003147 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003148
Sathya Perla5fb379e2009-06-18 00:02:59 +00003149 status = be_mcc_queues_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003150 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003151 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003152
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003153 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3154 /* In UMC mode FW does not return right privileges.
3155 * Override with correct privilege equivalent to PF.
3156 */
3157 if (be_is_mc(adapter))
3158 adapter->cmd_privileges = MAX_PRIVILEGES;
3159
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003160 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3161 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00003162
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003163 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003164 en_flags |= BE_IF_FLAGS_RSS;
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003165
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003166 en_flags = en_flags & adapter->if_cap_flags;
Padmanabh Ratnakar0b13fb42012-07-18 02:51:58 +00003167
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003168 status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003169 &adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003170 if (status != 0)
3171 goto err;
3172
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003173 memset(mac, 0, ETH_ALEN);
3174 active_mac = false;
3175 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
3176 &active_mac, &adapter->pmac_id[0]);
3177 if (status != 0)
3178 goto err;
3179
3180 if (!active_mac) {
3181 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3182 &adapter->pmac_id[0], 0);
3183 if (status != 0)
3184 goto err;
3185 }
3186
3187 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3188 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3189 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003190 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00003191
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003192 status = be_tx_qs_create(adapter);
3193 if (status)
3194 goto err;
3195
Sathya Perla04b71172011-09-27 13:30:27 -04003196 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003197
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003198 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003199 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003200
3201 be_set_rx_mode(adapter->netdev);
3202
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003203 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003204
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003205 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3206 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003207 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003208
Sathya Perlab4c1df92013-05-08 02:05:47 +00003209 if (be_physfn(adapter)) {
Sathya Perla39f1d942012-05-08 19:41:24 +00003210 if (adapter->dev_num_vfs)
3211 be_vf_setup(adapter);
3212 else
3213 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003214 }
3215
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003216 status = be_cmd_get_phy_info(adapter);
3217 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003218 adapter->phy.fc_autoneg = 1;
3219
Sathya Perla191eb752012-02-23 18:50:13 +00003220 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3221 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003222 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003223err:
3224 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003225 return status;
3226}
3227
Ivan Vecera66268732011-12-08 01:31:21 +00003228#ifdef CONFIG_NET_POLL_CONTROLLER
3229static void be_netpoll(struct net_device *netdev)
3230{
3231 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003232 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003233 int i;
3234
Sathya Perlae49cc342012-11-27 19:50:02 +00003235 for_all_evt_queues(adapter, eqo, i) {
3236 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3237 napi_schedule(&eqo->napi);
3238 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003239
3240 return;
Ivan Vecera66268732011-12-08 01:31:21 +00003241}
3242#endif
3243
Ajit Khaparde84517482009-09-04 03:12:16 +00003244#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003245char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3246
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003247static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003248 const u8 *p, u32 img_start, int image_size,
3249 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003250{
3251 u32 crc_offset;
3252 u8 flashed_crc[4];
3253 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003254
3255 crc_offset = hdr_size + img_start + image_size - 4;
3256
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003257 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003258
3259 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00003260 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003261 if (status) {
3262 dev_err(&adapter->pdev->dev,
3263 "could not get crc from flash, not flashing redboot\n");
3264 return false;
3265 }
3266
3267 /*update redboot only if crc does not match*/
3268 if (!memcmp(flashed_crc, p, 4))
3269 return false;
3270 else
3271 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003272}
3273
Sathya Perla306f1342011-08-02 19:57:45 +00003274static bool phy_flashing_required(struct be_adapter *adapter)
3275{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003276 return (adapter->phy.phy_type == TN_8022 &&
3277 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003278}
3279
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003280static bool is_comp_in_ufi(struct be_adapter *adapter,
3281 struct flash_section_info *fsec, int type)
3282{
3283 int i = 0, img_type = 0;
3284 struct flash_section_info_g2 *fsec_g2 = NULL;
3285
Sathya Perlaca34fe382012-11-06 17:48:56 +00003286 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003287 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3288
3289 for (i = 0; i < MAX_FLASH_COMP; i++) {
3290 if (fsec_g2)
3291 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3292 else
3293 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3294
3295 if (img_type == type)
3296 return true;
3297 }
3298 return false;
3299
3300}
3301
3302struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3303 int header_size,
3304 const struct firmware *fw)
3305{
3306 struct flash_section_info *fsec = NULL;
3307 const u8 *p = fw->data;
3308
3309 p += header_size;
3310 while (p < (fw->data + fw->size)) {
3311 fsec = (struct flash_section_info *)p;
3312 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3313 return fsec;
3314 p += 32;
3315 }
3316 return NULL;
3317}
3318
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003319static int be_flash(struct be_adapter *adapter, const u8 *img,
3320 struct be_dma_mem *flash_cmd, int optype, int img_size)
3321{
3322 u32 total_bytes = 0, flash_op, num_bytes = 0;
3323 int status = 0;
3324 struct be_cmd_write_flashrom *req = flash_cmd->va;
3325
3326 total_bytes = img_size;
3327 while (total_bytes) {
3328 num_bytes = min_t(u32, 32*1024, total_bytes);
3329
3330 total_bytes -= num_bytes;
3331
3332 if (!total_bytes) {
3333 if (optype == OPTYPE_PHY_FW)
3334 flash_op = FLASHROM_OPER_PHY_FLASH;
3335 else
3336 flash_op = FLASHROM_OPER_FLASH;
3337 } else {
3338 if (optype == OPTYPE_PHY_FW)
3339 flash_op = FLASHROM_OPER_PHY_SAVE;
3340 else
3341 flash_op = FLASHROM_OPER_SAVE;
3342 }
3343
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003344 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003345 img += num_bytes;
3346 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3347 flash_op, num_bytes);
3348 if (status) {
3349 if (status == ILLEGAL_IOCTL_REQ &&
3350 optype == OPTYPE_PHY_FW)
3351 break;
3352 dev_err(&adapter->pdev->dev,
3353 "cmd to write to flash rom failed.\n");
3354 return status;
3355 }
3356 }
3357 return 0;
3358}
3359
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003360/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe382012-11-06 17:48:56 +00003361static int be_flash_BEx(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003362 const struct firmware *fw,
3363 struct be_dma_mem *flash_cmd,
3364 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003365
Ajit Khaparde84517482009-09-04 03:12:16 +00003366{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003367 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003368 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde84517482009-09-04 03:12:16 +00003369 const u8 *p = fw->data;
Joe Perches215faf92010-12-21 02:16:10 -08003370 const struct flash_comp *pflashcomp;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003371 int num_comp, redboot;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003372 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003373
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003374 struct flash_comp gen3_flash_types[] = {
3375 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3376 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3377 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3378 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3379 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3380 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3381 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3382 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3383 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3384 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3385 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3386 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3387 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3388 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3389 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3390 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3391 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3392 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3393 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3394 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003395 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003396
3397 struct flash_comp gen2_flash_types[] = {
3398 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3399 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3400 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3401 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3402 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3403 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3404 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3405 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3406 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3407 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3408 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3409 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3410 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3411 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3412 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3413 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003414 };
3415
Sathya Perlaca34fe382012-11-06 17:48:56 +00003416 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003417 pflashcomp = gen3_flash_types;
3418 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003419 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003420 } else {
3421 pflashcomp = gen2_flash_types;
3422 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003423 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003424 }
Sathya Perlaca34fe382012-11-06 17:48:56 +00003425
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003426 /* Get flash section info*/
3427 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3428 if (!fsec) {
3429 dev_err(&adapter->pdev->dev,
3430 "Invalid Cookie. UFI corrupted ?\n");
3431 return -1;
3432 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003433 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003434 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003435 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003436
3437 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3438 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3439 continue;
3440
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003441 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3442 !phy_flashing_required(adapter))
3443 continue;
3444
3445 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3446 redboot = be_flash_redboot(adapter, fw->data,
3447 pflashcomp[i].offset, pflashcomp[i].size,
3448 filehdr_size + img_hdrs_size);
3449 if (!redboot)
Sathya Perla306f1342011-08-02 19:57:45 +00003450 continue;
3451 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003452
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003453 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003454 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003455 if (p + pflashcomp[i].size > fw->data + fw->size)
3456 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003457
3458 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3459 pflashcomp[i].size);
3460 if (status) {
3461 dev_err(&adapter->pdev->dev,
3462 "Flashing section type %d failed.\n",
3463 pflashcomp[i].img_type);
3464 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003465 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003466 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003467 return 0;
3468}
3469
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003470static int be_flash_skyhawk(struct be_adapter *adapter,
3471 const struct firmware *fw,
3472 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003473{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003474 int status = 0, i, filehdr_size = 0;
3475 int img_offset, img_size, img_optype, redboot;
3476 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3477 const u8 *p = fw->data;
3478 struct flash_section_info *fsec = NULL;
3479
3480 filehdr_size = sizeof(struct flash_file_hdr_g3);
3481 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3482 if (!fsec) {
3483 dev_err(&adapter->pdev->dev,
3484 "Invalid Cookie. UFI corrupted ?\n");
3485 return -1;
3486 }
3487
3488 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3489 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3490 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3491
3492 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3493 case IMAGE_FIRMWARE_iSCSI:
3494 img_optype = OPTYPE_ISCSI_ACTIVE;
3495 break;
3496 case IMAGE_BOOT_CODE:
3497 img_optype = OPTYPE_REDBOOT;
3498 break;
3499 case IMAGE_OPTION_ROM_ISCSI:
3500 img_optype = OPTYPE_BIOS;
3501 break;
3502 case IMAGE_OPTION_ROM_PXE:
3503 img_optype = OPTYPE_PXE_BIOS;
3504 break;
3505 case IMAGE_OPTION_ROM_FCoE:
3506 img_optype = OPTYPE_FCOE_BIOS;
3507 break;
3508 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3509 img_optype = OPTYPE_ISCSI_BACKUP;
3510 break;
3511 case IMAGE_NCSI:
3512 img_optype = OPTYPE_NCSI_FW;
3513 break;
3514 default:
3515 continue;
3516 }
3517
3518 if (img_optype == OPTYPE_REDBOOT) {
3519 redboot = be_flash_redboot(adapter, fw->data,
3520 img_offset, img_size,
3521 filehdr_size + img_hdrs_size);
3522 if (!redboot)
3523 continue;
3524 }
3525
3526 p = fw->data;
3527 p += filehdr_size + img_offset + img_hdrs_size;
3528 if (p + img_size > fw->data + fw->size)
3529 return -1;
3530
3531 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3532 if (status) {
3533 dev_err(&adapter->pdev->dev,
3534 "Flashing section type %d failed.\n",
3535 fsec->fsec_entry[i].type);
3536 return status;
3537 }
3538 }
3539 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003540}
3541
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003542static int lancer_wait_idle(struct be_adapter *adapter)
3543{
3544#define SLIPORT_IDLE_TIMEOUT 30
3545 u32 reg_val;
3546 int status = 0, i;
3547
3548 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3549 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3550 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3551 break;
3552
3553 ssleep(1);
3554 }
3555
3556 if (i == SLIPORT_IDLE_TIMEOUT)
3557 status = -1;
3558
3559 return status;
3560}
3561
3562static int lancer_fw_reset(struct be_adapter *adapter)
3563{
3564 int status = 0;
3565
3566 status = lancer_wait_idle(adapter);
3567 if (status)
3568 return status;
3569
3570 iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3571 PHYSDEV_CONTROL_OFFSET);
3572
3573 return status;
3574}
3575
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003576static int lancer_fw_download(struct be_adapter *adapter,
3577 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003578{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003579#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3580#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3581 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003582 const u8 *data_ptr = NULL;
3583 u8 *dest_image_ptr = NULL;
3584 size_t image_size = 0;
3585 u32 chunk_size = 0;
3586 u32 data_written = 0;
3587 u32 offset = 0;
3588 int status = 0;
3589 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003590 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003591
3592 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3593 dev_err(&adapter->pdev->dev,
3594 "FW Image not properly aligned. "
3595 "Length must be 4 byte aligned.\n");
3596 status = -EINVAL;
3597 goto lancer_fw_exit;
3598 }
3599
3600 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3601 + LANCER_FW_DOWNLOAD_CHUNK;
3602 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00003603 &flash_cmd.dma, GFP_KERNEL);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003604 if (!flash_cmd.va) {
3605 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003606 goto lancer_fw_exit;
3607 }
3608
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003609 dest_image_ptr = flash_cmd.va +
3610 sizeof(struct lancer_cmd_req_write_object);
3611 image_size = fw->size;
3612 data_ptr = fw->data;
3613
3614 while (image_size) {
3615 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3616
3617 /* Copy the image chunk content. */
3618 memcpy(dest_image_ptr, data_ptr, chunk_size);
3619
3620 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003621 chunk_size, offset,
3622 LANCER_FW_DOWNLOAD_LOCATION,
3623 &data_written, &change_status,
3624 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003625 if (status)
3626 break;
3627
3628 offset += data_written;
3629 data_ptr += data_written;
3630 image_size -= data_written;
3631 }
3632
3633 if (!status) {
3634 /* Commit the FW written */
3635 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003636 0, offset,
3637 LANCER_FW_DOWNLOAD_LOCATION,
3638 &data_written, &change_status,
3639 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003640 }
3641
3642 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3643 flash_cmd.dma);
3644 if (status) {
3645 dev_err(&adapter->pdev->dev,
3646 "Firmware load error. "
3647 "Status code: 0x%x Additional Status: 0x%x\n",
3648 status, add_status);
3649 goto lancer_fw_exit;
3650 }
3651
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003652 if (change_status == LANCER_FW_RESET_NEEDED) {
3653 status = lancer_fw_reset(adapter);
3654 if (status) {
3655 dev_err(&adapter->pdev->dev,
3656 "Adapter busy for FW reset.\n"
3657 "New FW will not be active.\n");
3658 goto lancer_fw_exit;
3659 }
3660 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3661 dev_err(&adapter->pdev->dev,
3662 "System reboot required for new FW"
3663 " to be active\n");
3664 }
3665
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003666 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3667lancer_fw_exit:
3668 return status;
3669}
3670
Sathya Perlaca34fe382012-11-06 17:48:56 +00003671#define UFI_TYPE2 2
3672#define UFI_TYPE3 3
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003673#define UFI_TYPE3R 10
Sathya Perlaca34fe382012-11-06 17:48:56 +00003674#define UFI_TYPE4 4
3675static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003676 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003677{
3678 if (fhdr == NULL)
3679 goto be_get_ufi_exit;
3680
Sathya Perlaca34fe382012-11-06 17:48:56 +00003681 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3682 return UFI_TYPE4;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003683 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3684 if (fhdr->asic_type_rev == 0x10)
3685 return UFI_TYPE3R;
3686 else
3687 return UFI_TYPE3;
3688 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
Sathya Perlaca34fe382012-11-06 17:48:56 +00003689 return UFI_TYPE2;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003690
3691be_get_ufi_exit:
3692 dev_err(&adapter->pdev->dev,
3693 "UFI and Interface are not compatible for flashing\n");
3694 return -1;
3695}
3696
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003697static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3698{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003699 struct flash_file_hdr_g3 *fhdr3;
3700 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003701 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003702 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003703 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003704
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003705 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003706 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3707 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003708 if (!flash_cmd.va) {
3709 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003710 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003711 }
3712
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003713 p = fw->data;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003714 fhdr3 = (struct flash_file_hdr_g3 *)p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003715
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003716 ufi_type = be_get_ufi_type(adapter, fhdr3);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003717
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003718 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3719 for (i = 0; i < num_imgs; i++) {
3720 img_hdr_ptr = (struct image_hdr *)(fw->data +
3721 (sizeof(struct flash_file_hdr_g3) +
3722 i * sizeof(struct image_hdr)));
3723 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003724 switch (ufi_type) {
3725 case UFI_TYPE4:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003726 status = be_flash_skyhawk(adapter, fw,
3727 &flash_cmd, num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003728 break;
3729 case UFI_TYPE3R:
Sathya Perlaca34fe382012-11-06 17:48:56 +00003730 status = be_flash_BEx(adapter, fw, &flash_cmd,
3731 num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003732 break;
3733 case UFI_TYPE3:
3734 /* Do not flash this ufi on BE3-R cards */
3735 if (adapter->asic_rev < 0x10)
3736 status = be_flash_BEx(adapter, fw,
3737 &flash_cmd,
3738 num_imgs);
3739 else {
3740 status = -1;
3741 dev_err(&adapter->pdev->dev,
3742 "Can't load BE3 UFI on BE3R\n");
3743 }
3744 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003745 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003746 }
3747
Sathya Perlaca34fe382012-11-06 17:48:56 +00003748 if (ufi_type == UFI_TYPE2)
3749 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003750 else if (ufi_type == -1)
3751 status = -1;
3752
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003753 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3754 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003755 if (status) {
3756 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003757 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003758 }
3759
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003760 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003761
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003762be_fw_exit:
3763 return status;
3764}
3765
3766int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3767{
3768 const struct firmware *fw;
3769 int status;
3770
3771 if (!netif_running(adapter->netdev)) {
3772 dev_err(&adapter->pdev->dev,
3773 "Firmware load not allowed (interface is down)\n");
3774 return -1;
3775 }
3776
3777 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3778 if (status)
3779 goto fw_exit;
3780
3781 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3782
3783 if (lancer_chip(adapter))
3784 status = lancer_fw_download(adapter, fw);
3785 else
3786 status = be_fw_download(adapter, fw);
3787
Ajit Khaparde84517482009-09-04 03:12:16 +00003788fw_exit:
3789 release_firmware(fw);
3790 return status;
3791}
3792
stephen hemmingere5686ad2012-01-05 19:10:25 +00003793static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003794 .ndo_open = be_open,
3795 .ndo_stop = be_close,
3796 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003797 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003798 .ndo_set_mac_address = be_mac_addr_set,
3799 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003800 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003801 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003802 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3803 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003804 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003805 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003806 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003807 .ndo_get_vf_config = be_get_vf_config,
3808#ifdef CONFIG_NET_POLL_CONTROLLER
3809 .ndo_poll_controller = be_netpoll,
3810#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003811};
3812
3813static void be_netdev_init(struct net_device *netdev)
3814{
3815 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003816 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003817 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003818
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003819 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003820 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00003821 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003822 if (be_multi_rxq(adapter))
3823 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003824
3825 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00003826 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003827
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003828 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003829 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003830
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003831 netdev->priv_flags |= IFF_UNICAST_FLT;
3832
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003833 netdev->flags |= IFF_MULTICAST;
3834
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00003835 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003836
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003837 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003838
3839 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3840
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003841 for_all_evt_queues(adapter, eqo, i)
3842 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003843}
3844
3845static void be_unmap_pci_bars(struct be_adapter *adapter)
3846{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00003847 if (adapter->csr)
3848 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003849 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00003850 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00003851}
3852
Sathya Perlace66f782012-11-06 17:48:58 +00003853static int db_bar(struct be_adapter *adapter)
3854{
3855 if (lancer_chip(adapter) || !be_physfn(adapter))
3856 return 0;
3857 else
3858 return 4;
3859}
3860
3861static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00003862{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00003863 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00003864 adapter->roce_db.size = 4096;
3865 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3866 db_bar(adapter));
3867 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3868 db_bar(adapter));
3869 }
Parav Pandit045508a2012-03-26 14:27:13 +00003870 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003871}
3872
3873static int be_map_pci_bars(struct be_adapter *adapter)
3874{
3875 u8 __iomem *addr;
Sathya Perlace66f782012-11-06 17:48:58 +00003876 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003877
Sathya Perlace66f782012-11-06 17:48:58 +00003878 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3879 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3880 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003881
Sathya Perlac5b3ad42013-03-05 22:23:20 +00003882 if (BEx_chip(adapter) && be_physfn(adapter)) {
3883 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3884 if (adapter->csr == NULL)
3885 return -ENOMEM;
3886 }
3887
Sathya Perlace66f782012-11-06 17:48:58 +00003888 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003889 if (addr == NULL)
3890 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003891 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00003892
3893 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003894 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00003895
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003896pci_map_err:
3897 be_unmap_pci_bars(adapter);
3898 return -ENOMEM;
3899}
3900
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003901static void be_ctrl_cleanup(struct be_adapter *adapter)
3902{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003903 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003904
3905 be_unmap_pci_bars(adapter);
3906
3907 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003908 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3909 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003910
Sathya Perla5b8821b2011-08-02 19:57:44 +00003911 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003912 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003913 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3914 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003915}
3916
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003917static int be_ctrl_init(struct be_adapter *adapter)
3918{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003919 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3920 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003921 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00003922 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003923 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003924
Sathya Perlace66f782012-11-06 17:48:58 +00003925 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3926 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3927 SLI_INTF_FAMILY_SHIFT;
3928 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3929
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003930 status = be_map_pci_bars(adapter);
3931 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003932 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003933
3934 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003935 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3936 mbox_mem_alloc->size,
3937 &mbox_mem_alloc->dma,
3938 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003939 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003940 status = -ENOMEM;
3941 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003942 }
3943 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3944 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3945 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3946 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003947
Sathya Perla5b8821b2011-08-02 19:57:44 +00003948 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3949 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
Joe Perches1f9061d22013-03-15 07:23:58 +00003950 &rx_filter->dma,
3951 GFP_KERNEL | __GFP_ZERO);
Sathya Perla5b8821b2011-08-02 19:57:44 +00003952 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003953 status = -ENOMEM;
3954 goto free_mbox;
3955 }
Joe Perches1f9061d22013-03-15 07:23:58 +00003956
Ivan Vecera29849612010-12-14 05:43:19 +00003957 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003958 spin_lock_init(&adapter->mcc_lock);
3959 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003960
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003961 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003962 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003963 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003964
3965free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003966 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3967 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003968
3969unmap_pci_bars:
3970 be_unmap_pci_bars(adapter);
3971
3972done:
3973 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003974}
3975
3976static void be_stats_cleanup(struct be_adapter *adapter)
3977{
Sathya Perla3abcded2010-10-03 22:12:27 -07003978 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003979
3980 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003981 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3982 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003983}
3984
3985static int be_stats_init(struct be_adapter *adapter)
3986{
Sathya Perla3abcded2010-10-03 22:12:27 -07003987 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003988
Sathya Perlaca34fe382012-11-06 17:48:56 +00003989 if (lancer_chip(adapter))
3990 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3991 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003992 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Sathya Perlaca34fe382012-11-06 17:48:56 +00003993 else
3994 /* BE3 and Skyhawk */
3995 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3996
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003997 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
Joe Perches1f9061d22013-03-15 07:23:58 +00003998 GFP_KERNEL | __GFP_ZERO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003999 if (cmd->va == NULL)
4000 return -1;
4001 return 0;
4002}
4003
Bill Pemberton3bc6b062012-12-03 09:23:09 -05004004static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004005{
4006 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004007
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004008 if (!adapter)
4009 return;
4010
Parav Pandit045508a2012-03-26 14:27:13 +00004011 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004012 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00004013
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004014 cancel_delayed_work_sync(&adapter->func_recovery_work);
4015
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004016 unregister_netdev(adapter->netdev);
4017
Sathya Perla5fb379e2009-06-18 00:02:59 +00004018 be_clear(adapter);
4019
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004020 /* tell fw we're done with firing cmds */
4021 be_cmd_fw_clean(adapter);
4022
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004023 be_stats_cleanup(adapter);
4024
4025 be_ctrl_cleanup(adapter);
4026
Sathya Perlad6b6d982012-09-05 01:56:48 +00004027 pci_disable_pcie_error_reporting(pdev);
4028
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004029 pci_set_drvdata(pdev, NULL);
4030 pci_release_regions(pdev);
4031 pci_disable_device(pdev);
4032
4033 free_netdev(adapter->netdev);
4034}
4035
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00004036bool be_is_wol_supported(struct be_adapter *adapter)
4037{
4038 return ((adapter->wol_cap & BE_WOL_CAP) &&
4039 !be_is_wol_excluded(adapter)) ? true : false;
4040}
4041
Somnath Kotur941a77d2012-05-17 22:59:03 +00004042u32 be_get_fw_log_level(struct be_adapter *adapter)
4043{
4044 struct be_dma_mem extfat_cmd;
4045 struct be_fat_conf_params *cfgs;
4046 int status;
4047 u32 level = 0;
4048 int j;
4049
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004050 if (lancer_chip(adapter))
4051 return 0;
4052
Somnath Kotur941a77d2012-05-17 22:59:03 +00004053 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4054 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4055 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
4056 &extfat_cmd.dma);
4057
4058 if (!extfat_cmd.va) {
4059 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4060 __func__);
4061 goto err;
4062 }
4063
4064 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4065 if (!status) {
4066 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4067 sizeof(struct be_cmd_resp_hdr));
Anton Blanchardac46a462012-07-24 15:05:25 +00004068 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
Somnath Kotur941a77d2012-05-17 22:59:03 +00004069 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4070 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4071 }
4072 }
4073 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
4074 extfat_cmd.dma);
4075err:
4076 return level;
4077}
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004078
Sathya Perla39f1d942012-05-08 19:41:24 +00004079static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004080{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004081 int status;
Somnath Kotur941a77d2012-05-17 22:59:03 +00004082 u32 level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00004083
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00004084 status = be_cmd_get_cntl_attributes(adapter);
4085 if (status)
4086 return status;
4087
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00004088 status = be_cmd_get_acpi_wol_cap(adapter);
4089 if (status) {
4090 /* in case of a failure to get wol capabillities
4091 * check the exclusion list to determine WOL capability */
4092 if (!be_is_wol_excluded(adapter))
4093 adapter->wol_cap |= BE_WOL_CAP;
4094 }
4095
4096 if (be_is_wol_supported(adapter))
4097 adapter->wol = true;
4098
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004099 /* Must be a power of 2 or else MODULO will BUG_ON */
4100 adapter->be_get_temp_freq = 64;
4101
Somnath Kotur941a77d2012-05-17 22:59:03 +00004102 level = be_get_fw_log_level(adapter);
4103 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4104
Sathya Perla2243e2e2009-11-22 22:02:03 +00004105 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004106}
4107
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004108static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004109{
4110 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004111
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004112 status = lancer_test_and_set_rdy_state(adapter);
4113 if (status)
4114 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004115
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004116 if (netif_running(adapter->netdev))
4117 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004118
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004119 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004120
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004121 adapter->hw_error = false;
4122 adapter->fw_timeout = false;
4123
4124 status = be_setup(adapter);
4125 if (status)
4126 goto err;
4127
4128 if (netif_running(adapter->netdev)) {
4129 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004130 if (status)
4131 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004132 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004133
4134 dev_err(&adapter->pdev->dev,
4135 "Adapter SLIPORT recovery succeeded\n");
4136 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004137err:
Padmanabh Ratnakar67297ad2012-10-20 06:02:27 +00004138 if (adapter->eeh_error)
4139 dev_err(&adapter->pdev->dev,
4140 "Adapter SLIPORT recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004141
4142 return status;
4143}
4144
4145static void be_func_recovery_task(struct work_struct *work)
4146{
4147 struct be_adapter *adapter =
4148 container_of(work, struct be_adapter, func_recovery_work.work);
4149 int status;
4150
4151 be_detect_error(adapter);
4152
4153 if (adapter->hw_error && lancer_chip(adapter)) {
4154
4155 if (adapter->eeh_error)
4156 goto out;
4157
4158 rtnl_lock();
4159 netif_device_detach(adapter->netdev);
4160 rtnl_unlock();
4161
4162 status = lancer_recover_func(adapter);
4163
4164 if (!status)
4165 netif_device_attach(adapter->netdev);
4166 }
4167
4168out:
4169 schedule_delayed_work(&adapter->func_recovery_work,
4170 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004171}
4172
4173static void be_worker(struct work_struct *work)
4174{
4175 struct be_adapter *adapter =
4176 container_of(work, struct be_adapter, work.work);
4177 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004178 struct be_eq_obj *eqo;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004179 int i;
4180
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004181 /* when interrupts are not yet enabled, just reap any pending
4182 * mcc completions */
4183 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004184 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004185 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004186 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004187 goto reschedule;
4188 }
4189
4190 if (!adapter->stats_cmd_sent) {
4191 if (lancer_chip(adapter))
4192 lancer_cmd_get_pport_stats(adapter,
4193 &adapter->stats_cmd);
4194 else
4195 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4196 }
4197
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004198 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4199 be_cmd_get_die_temperature(adapter);
4200
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004201 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004202 if (rxo->rx_post_starved) {
4203 rxo->rx_post_starved = false;
4204 be_post_rx_frags(rxo, GFP_KERNEL);
4205 }
4206 }
4207
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004208 for_all_evt_queues(adapter, eqo, i)
4209 be_eqd_update(adapter, eqo);
4210
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004211reschedule:
4212 adapter->work_counter++;
4213 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4214}
4215
Sathya Perla39f1d942012-05-08 19:41:24 +00004216static bool be_reset_required(struct be_adapter *adapter)
4217{
Sathya Perlad79c0a22012-06-05 19:37:22 +00004218 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004219}
4220
Sathya Perlad3791422012-09-28 04:39:44 +00004221static char *mc_name(struct be_adapter *adapter)
4222{
4223 if (adapter->function_mode & FLEX10_MODE)
4224 return "FLEX10";
4225 else if (adapter->function_mode & VNIC_MODE)
4226 return "vNIC";
4227 else if (adapter->function_mode & UMC_ENABLED)
4228 return "UMC";
4229 else
4230 return "";
4231}
4232
4233static inline char *func_name(struct be_adapter *adapter)
4234{
4235 return be_physfn(adapter) ? "PF" : "VF";
4236}
4237
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004238static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004239{
4240 int status = 0;
4241 struct be_adapter *adapter;
4242 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004243 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004244
4245 status = pci_enable_device(pdev);
4246 if (status)
4247 goto do_none;
4248
4249 status = pci_request_regions(pdev, DRV_NAME);
4250 if (status)
4251 goto disable_dev;
4252 pci_set_master(pdev);
4253
Sathya Perla7f640062012-06-05 19:37:20 +00004254 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004255 if (netdev == NULL) {
4256 status = -ENOMEM;
4257 goto rel_reg;
4258 }
4259 adapter = netdev_priv(netdev);
4260 adapter->pdev = pdev;
4261 pci_set_drvdata(pdev, adapter);
4262 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004263 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004264
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004265 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004266 if (!status) {
Craig Hada2bd92cd2013-04-21 23:28:18 +00004267 status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4268 if (status < 0) {
4269 dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
4270 goto free_netdev;
4271 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004272 netdev->features |= NETIF_F_HIGHDMA;
4273 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004274 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004275 if (status) {
4276 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4277 goto free_netdev;
4278 }
4279 }
4280
Sathya Perlad6b6d982012-09-05 01:56:48 +00004281 status = pci_enable_pcie_error_reporting(pdev);
4282 if (status)
4283 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4284
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004285 status = be_ctrl_init(adapter);
4286 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004287 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004288
Sathya Perla2243e2e2009-11-22 22:02:03 +00004289 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004290 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004291 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004292 if (status)
4293 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004294 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004295
Sathya Perla39f1d942012-05-08 19:41:24 +00004296 if (be_reset_required(adapter)) {
4297 status = be_cmd_reset_function(adapter);
4298 if (status)
4299 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004300
Kalesh AP2d177be2013-04-28 22:22:29 +00004301 /* Wait for interrupts to quiesce after an FLR */
4302 msleep(100);
4303 }
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004304
4305 /* Allow interrupts for other ULPs running on NIC function */
4306 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004307
Kalesh AP2d177be2013-04-28 22:22:29 +00004308 /* tell fw we're ready to fire cmds */
4309 status = be_cmd_fw_init(adapter);
4310 if (status)
4311 goto ctrl_clean;
4312
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004313 status = be_stats_init(adapter);
4314 if (status)
4315 goto ctrl_clean;
4316
Sathya Perla39f1d942012-05-08 19:41:24 +00004317 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004318 if (status)
4319 goto stats_clean;
4320
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004321 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004322 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004323 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004324
Sathya Perla5fb379e2009-06-18 00:02:59 +00004325 status = be_setup(adapter);
4326 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004327 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004328
Sathya Perla3abcded2010-10-03 22:12:27 -07004329 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004330 status = register_netdev(netdev);
4331 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004332 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004333
Parav Pandit045508a2012-03-26 14:27:13 +00004334 be_roce_dev_add(adapter);
4335
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004336 schedule_delayed_work(&adapter->func_recovery_work,
4337 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004338
4339 be_cmd_query_port_name(adapter, &port_name);
4340
Sathya Perlad3791422012-09-28 04:39:44 +00004341 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4342 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004343
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004344 return 0;
4345
Sathya Perla5fb379e2009-06-18 00:02:59 +00004346unsetup:
4347 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004348stats_clean:
4349 be_stats_cleanup(adapter);
4350ctrl_clean:
4351 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004352free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004353 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004354 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004355rel_reg:
4356 pci_release_regions(pdev);
4357disable_dev:
4358 pci_disable_device(pdev);
4359do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004360 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004361 return status;
4362}
4363
4364static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4365{
4366 struct be_adapter *adapter = pci_get_drvdata(pdev);
4367 struct net_device *netdev = adapter->netdev;
4368
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004369 if (adapter->wol)
4370 be_setup_wol(adapter, true);
4371
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004372 cancel_delayed_work_sync(&adapter->func_recovery_work);
4373
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004374 netif_device_detach(netdev);
4375 if (netif_running(netdev)) {
4376 rtnl_lock();
4377 be_close(netdev);
4378 rtnl_unlock();
4379 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004380 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004381
4382 pci_save_state(pdev);
4383 pci_disable_device(pdev);
4384 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4385 return 0;
4386}
4387
4388static int be_resume(struct pci_dev *pdev)
4389{
4390 int status = 0;
4391 struct be_adapter *adapter = pci_get_drvdata(pdev);
4392 struct net_device *netdev = adapter->netdev;
4393
4394 netif_device_detach(netdev);
4395
4396 status = pci_enable_device(pdev);
4397 if (status)
4398 return status;
4399
4400 pci_set_power_state(pdev, 0);
4401 pci_restore_state(pdev);
4402
Sathya Perla2243e2e2009-11-22 22:02:03 +00004403 /* tell fw we're ready to fire cmds */
4404 status = be_cmd_fw_init(adapter);
4405 if (status)
4406 return status;
4407
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004408 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004409 if (netif_running(netdev)) {
4410 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004411 be_open(netdev);
4412 rtnl_unlock();
4413 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004414
4415 schedule_delayed_work(&adapter->func_recovery_work,
4416 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004417 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004418
4419 if (adapter->wol)
4420 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004421
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004422 return 0;
4423}
4424
Sathya Perla82456b02010-02-17 01:35:37 +00004425/*
4426 * An FLR will stop BE from DMAing any data.
4427 */
4428static void be_shutdown(struct pci_dev *pdev)
4429{
4430 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004431
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004432 if (!adapter)
4433 return;
Sathya Perla82456b02010-02-17 01:35:37 +00004434
Sathya Perla0f4a6822011-03-21 20:49:28 +00004435 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004436 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004437
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004438 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004439
Ajit Khaparde57841862011-04-06 18:08:43 +00004440 be_cmd_reset_function(adapter);
4441
Sathya Perla82456b02010-02-17 01:35:37 +00004442 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004443}
4444
Sathya Perlacf588472010-02-14 21:22:01 +00004445static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4446 pci_channel_state_t state)
4447{
4448 struct be_adapter *adapter = pci_get_drvdata(pdev);
4449 struct net_device *netdev = adapter->netdev;
4450
4451 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4452
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004453 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00004454
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004455 cancel_delayed_work_sync(&adapter->func_recovery_work);
4456
4457 rtnl_lock();
Sathya Perlacf588472010-02-14 21:22:01 +00004458 netif_device_detach(netdev);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004459 rtnl_unlock();
Sathya Perlacf588472010-02-14 21:22:01 +00004460
4461 if (netif_running(netdev)) {
4462 rtnl_lock();
4463 be_close(netdev);
4464 rtnl_unlock();
4465 }
4466 be_clear(adapter);
4467
4468 if (state == pci_channel_io_perm_failure)
4469 return PCI_ERS_RESULT_DISCONNECT;
4470
4471 pci_disable_device(pdev);
4472
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004473 /* The error could cause the FW to trigger a flash debug dump.
4474 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004475 * can cause it not to recover; wait for it to finish.
4476 * Wait only for first function as it is needed only once per
4477 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004478 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004479 if (pdev->devfn == 0)
4480 ssleep(30);
4481
Sathya Perlacf588472010-02-14 21:22:01 +00004482 return PCI_ERS_RESULT_NEED_RESET;
4483}
4484
4485static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4486{
4487 struct be_adapter *adapter = pci_get_drvdata(pdev);
4488 int status;
4489
4490 dev_info(&adapter->pdev->dev, "EEH reset\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004491 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004492
4493 status = pci_enable_device(pdev);
4494 if (status)
4495 return PCI_ERS_RESULT_DISCONNECT;
4496
4497 pci_set_master(pdev);
4498 pci_set_power_state(pdev, 0);
4499 pci_restore_state(pdev);
4500
4501 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004502 dev_info(&adapter->pdev->dev,
4503 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004504 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004505 if (status)
4506 return PCI_ERS_RESULT_DISCONNECT;
4507
Sathya Perlad6b6d982012-09-05 01:56:48 +00004508 pci_cleanup_aer_uncorrect_error_status(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00004509 return PCI_ERS_RESULT_RECOVERED;
4510}
4511
4512static void be_eeh_resume(struct pci_dev *pdev)
4513{
4514 int status = 0;
4515 struct be_adapter *adapter = pci_get_drvdata(pdev);
4516 struct net_device *netdev = adapter->netdev;
4517
4518 dev_info(&adapter->pdev->dev, "EEH resume\n");
4519
4520 pci_save_state(pdev);
4521
Kalesh AP2d177be2013-04-28 22:22:29 +00004522 status = be_cmd_reset_function(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004523 if (status)
4524 goto err;
4525
Kalesh AP2d177be2013-04-28 22:22:29 +00004526 /* tell fw we're ready to fire cmds */
4527 status = be_cmd_fw_init(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004528 if (status)
4529 goto err;
4530
Sathya Perlacf588472010-02-14 21:22:01 +00004531 status = be_setup(adapter);
4532 if (status)
4533 goto err;
4534
4535 if (netif_running(netdev)) {
4536 status = be_open(netdev);
4537 if (status)
4538 goto err;
4539 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004540
4541 schedule_delayed_work(&adapter->func_recovery_work,
4542 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00004543 netif_device_attach(netdev);
4544 return;
4545err:
4546 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004547}
4548
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004549static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00004550 .error_detected = be_eeh_err_detected,
4551 .slot_reset = be_eeh_reset,
4552 .resume = be_eeh_resume,
4553};
4554
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004555static struct pci_driver be_driver = {
4556 .name = DRV_NAME,
4557 .id_table = be_dev_ids,
4558 .probe = be_probe,
4559 .remove = be_remove,
4560 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004561 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004562 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004563 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004564};
4565
4566static int __init be_init_module(void)
4567{
Joe Perches8e95a202009-12-03 07:58:21 +00004568 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4569 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004570 printk(KERN_WARNING DRV_NAME
4571 " : Module param rx_frag_size must be 2048/4096/8192."
4572 " Using 2048\n");
4573 rx_frag_size = 2048;
4574 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004575
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004576 return pci_register_driver(&be_driver);
4577}
4578module_init(be_init_module);
4579
4580static void __exit be_exit_module(void)
4581{
4582 pci_unregister_driver(&be_driver);
4583}
4584module_exit(be_exit_module);