blob: a25d35a1b03d42e8e43008e28d88161e69ef6ae0 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Somnath Kotur7dfbe7d2016-06-22 08:54:56 -04002 * Copyright (C) 2005 - 2016 Broadcom
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070029MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000030MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070031MODULE_LICENSE("GPL");
32
Vasundhara Volamace40af2015-03-04 00:44:34 -050033/* num_vfs module param is obsolete.
34 * Use sysfs method to enable/disable VFs.
35 */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000036static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000037module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000038MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039
Sathya Perla11ac75e2011-12-13 00:58:50 +000040static ushort rx_frag_size = 2048;
41module_param(rx_frag_size, ushort, S_IRUGO);
42MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
43
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +053044/* Per-module error detection/recovery workq shared across all functions.
45 * Each function schedules its own work request on this shared workq.
46 */
Wei Yongjune6053dd2016-09-25 15:40:36 +000047static struct workqueue_struct *be_err_recovery_workq;
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +053048
Benoit Taine9baa3c32014-08-08 15:56:03 +020049static const struct pci_device_id be_dev_ids[] = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070050 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070051 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070052 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
53 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000054 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000055 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000056 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000057 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070058 { 0 }
59};
60MODULE_DEVICE_TABLE(pci, be_dev_ids);
Sathya Perlab7172412016-07-27 05:26:18 -040061
62/* Workqueue used by all functions for defering cmd calls to the adapter */
Wei Yongjune6053dd2016-09-25 15:40:36 +000063static struct workqueue_struct *be_wq;
Sathya Perlab7172412016-07-27 05:26:18 -040064
Ajit Khaparde7c185272010-07-29 06:16:33 +000065/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070066static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000067 "CEV",
68 "CTX",
69 "DBUF",
70 "ERX",
71 "Host",
72 "MPU",
73 "NDMA",
74 "PTC ",
75 "RDMA ",
76 "RXF ",
77 "RXIPS ",
78 "RXULP0 ",
79 "RXULP1 ",
80 "RXULP2 ",
81 "TIM ",
82 "TPOST ",
83 "TPRE ",
84 "TXIPS ",
85 "TXULP0 ",
86 "TXULP1 ",
87 "UC ",
88 "WDMA ",
89 "TXULP2 ",
90 "HOST1 ",
91 "P0_OB_LINK ",
92 "P1_OB_LINK ",
93 "HOST_GPIO ",
94 "MBOX ",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +053095 "ERX2 ",
96 "SPARE ",
97 "JTAG ",
98 "MPU_INTPEND "
Ajit Khaparde7c185272010-07-29 06:16:33 +000099};
Kalesh APe2fb1af2014-09-19 15:46:58 +0530100
Ajit Khaparde7c185272010-07-29 06:16:33 +0000101/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -0700102static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +0000103 "LPCMEMHOST",
104 "MGMT_MAC",
105 "PCS0ONLINE",
106 "MPU_IRAM",
107 "PCS1ONLINE",
108 "PCTL0",
109 "PCTL1",
110 "PMEM",
111 "RR",
112 "TXPB",
113 "RXPP",
114 "XAUI",
115 "TXP",
116 "ARM",
117 "IPC",
118 "HOST2",
119 "HOST3",
120 "HOST4",
121 "HOST5",
122 "HOST6",
123 "HOST7",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530124 "ECRC",
125 "Poison TLP",
Joe Perches42c8b112011-07-09 02:56:56 -0700126 "NETC",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530127 "PERIPH",
128 "LLTXULP",
129 "D2P",
130 "RCON",
131 "LDMA",
132 "LLTXP",
133 "LLTXPB",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000134 "Unknown"
135};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700136
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -0500137#define BE_VF_IF_EN_FLAGS (BE_IF_FLAGS_UNTAGGED | \
138 BE_IF_FLAGS_BROADCAST | \
139 BE_IF_FLAGS_MULTICAST | \
140 BE_IF_FLAGS_PASS_L3L4_ERRORS)
141
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700142static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
143{
144 struct be_dma_mem *mem = &q->dma_mem;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530145
Sathya Perla1cfafab2012-02-23 18:50:15 +0000146 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000147 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
148 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000149 mem->va = NULL;
150 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700151}
152
153static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla748b5392014-05-09 13:29:13 +0530154 u16 len, u16 entry_size)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700155{
156 struct be_dma_mem *mem = &q->dma_mem;
157
158 memset(q, 0, sizeof(*q));
159 q->len = len;
160 q->entry_size = entry_size;
161 mem->size = len * entry_size;
Joe Perchesede23fa82013-08-26 22:45:23 -0700162 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
163 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000165 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 return 0;
167}
168
Somnath Kotur68c45a22013-03-14 02:42:07 +0000169static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700170{
Sathya Perladb3ea782011-08-22 19:41:52 +0000171 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000172
Sathya Perladb3ea782011-08-22 19:41:52 +0000173 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
Sathya Perla748b5392014-05-09 13:29:13 +0530174 &reg);
Sathya Perladb3ea782011-08-22 19:41:52 +0000175 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
176
Sathya Perla5f0b8492009-07-27 22:52:56 +0000177 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700178 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000179 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700180 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000181 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700182 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000183
Sathya Perladb3ea782011-08-22 19:41:52 +0000184 pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530185 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700186}
187
Somnath Kotur68c45a22013-03-14 02:42:07 +0000188static void be_intr_set(struct be_adapter *adapter, bool enable)
189{
190 int status = 0;
191
192 /* On lancer interrupts can't be controlled via this register */
193 if (lancer_chip(adapter))
194 return;
195
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530196 if (be_check_error(adapter, BE_ERROR_EEH))
Somnath Kotur68c45a22013-03-14 02:42:07 +0000197 return;
198
199 status = be_cmd_intr_set(adapter, enable);
200 if (status)
201 be_reg_intr_set(adapter, enable);
202}
203
Sathya Perla8788fdc2009-07-27 22:52:03 +0000204static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205{
206 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530207
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530208 if (be_check_error(adapter, BE_ERROR_HW))
209 return;
210
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700211 val |= qid & DB_RQ_RING_ID_MASK;
212 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000213
214 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000215 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700216}
217
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000218static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
219 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700220{
221 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530222
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530223 if (be_check_error(adapter, BE_ERROR_HW))
224 return;
225
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000226 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700227 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000228
229 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000230 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700231}
232
Sathya Perla8788fdc2009-07-27 22:52:03 +0000233static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Padmanabh Ratnakar20947772015-05-06 05:30:33 -0400234 bool arm, bool clear_int, u16 num_popped,
235 u32 eq_delay_mult_enc)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700236{
237 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530238
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700239 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perla748b5392014-05-09 13:29:13 +0530240 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000241
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530242 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perlacf588472010-02-14 21:22:01 +0000243 return;
244
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700245 if (arm)
246 val |= 1 << DB_EQ_REARM_SHIFT;
247 if (clear_int)
248 val |= 1 << DB_EQ_CLR_SHIFT;
249 val |= 1 << DB_EQ_EVNT_SHIFT;
250 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -0400251 val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000252 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700253}
254
Sathya Perla8788fdc2009-07-27 22:52:03 +0000255void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700256{
257 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530258
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700259 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000260 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
261 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000262
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530263 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perlacf588472010-02-14 21:22:01 +0000264 return;
265
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700266 if (arm)
267 val |= 1 << DB_CQ_REARM_SHIFT;
268 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000269 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700270}
271
Suresh Reddy988d44b2016-09-07 19:57:52 +0530272static int be_dev_mac_add(struct be_adapter *adapter, u8 *mac)
273{
274 int i;
275
276 /* Check if mac has already been added as part of uc-list */
277 for (i = 0; i < adapter->uc_macs; i++) {
278 if (ether_addr_equal((u8 *)&adapter->uc_list[i * ETH_ALEN],
279 mac)) {
280 /* mac already added, skip addition */
281 adapter->pmac_id[0] = adapter->pmac_id[i + 1];
282 return 0;
283 }
284 }
285
286 return be_cmd_pmac_add(adapter, mac, adapter->if_handle,
287 &adapter->pmac_id[0], 0);
288}
289
290static void be_dev_mac_del(struct be_adapter *adapter, int pmac_id)
291{
292 int i;
293
294 /* Skip deletion if the programmed mac is
295 * being used in uc-list
296 */
297 for (i = 0; i < adapter->uc_macs; i++) {
298 if (adapter->pmac_id[i + 1] == pmac_id)
299 return;
300 }
301 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
302}
303
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700304static int be_mac_addr_set(struct net_device *netdev, void *p)
305{
306 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530307 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700308 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530309 int status;
310 u8 mac[ETH_ALEN];
Suresh Reddy988d44b2016-09-07 19:57:52 +0530311 u32 old_pmac_id = adapter->pmac_id[0];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700312
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000313 if (!is_valid_ether_addr(addr->sa_data))
314 return -EADDRNOTAVAIL;
315
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530316 /* Proceed further only if, User provided MAC is different
317 * from active MAC
318 */
Suresh Reddyc27ebf52016-09-07 19:57:53 +0530319 if (ether_addr_equal(addr->sa_data, adapter->dev_mac))
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530320 return 0;
321
Kalesh APbcc84142015-08-05 03:27:48 -0400322 /* if device is not running, copy MAC to netdev->dev_addr */
323 if (!netif_running(netdev))
324 goto done;
325
Sathya Perla5a712c12013-07-23 15:24:59 +0530326 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
327 * privilege or if PF did not provision the new MAC address.
328 * On BE3, this cmd will always fail if the VF doesn't have the
329 * FILTMGMT privilege. This failure is OK, only if the PF programmed
330 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000331 */
Suresh Reddy988d44b2016-09-07 19:57:52 +0530332 mutex_lock(&adapter->rx_filter_lock);
333 status = be_dev_mac_add(adapter, (u8 *)addr->sa_data);
Sathya Perla5a712c12013-07-23 15:24:59 +0530334 if (!status) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530335
336 /* Delete the old programmed MAC. This call may fail if the
337 * old MAC was already deleted by the PF driver.
338 */
339 if (adapter->pmac_id[0] != old_pmac_id)
Suresh Reddy988d44b2016-09-07 19:57:52 +0530340 be_dev_mac_del(adapter, old_pmac_id);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000341 }
342
Suresh Reddy988d44b2016-09-07 19:57:52 +0530343 mutex_unlock(&adapter->rx_filter_lock);
Sathya Perla5a712c12013-07-23 15:24:59 +0530344 /* Decide if the new MAC is successfully activated only after
345 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000346 */
Suresh Reddy988d44b2016-09-07 19:57:52 +0530347 status = be_cmd_get_active_mac(adapter, adapter->pmac_id[0], mac,
Suresh Reddyb188f092014-01-15 13:23:39 +0530348 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000349 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000350 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700351
Sathya Perla5a712c12013-07-23 15:24:59 +0530352 /* The MAC change did not happen, either due to lack of privilege
353 * or PF didn't pre-provision.
354 */
dingtianhong61d23e92013-12-30 15:40:43 +0800355 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530356 status = -EPERM;
357 goto err;
358 }
Kalesh APbcc84142015-08-05 03:27:48 -0400359done:
Suresh Reddyc27ebf52016-09-07 19:57:53 +0530360 ether_addr_copy(adapter->dev_mac, addr->sa_data);
Kalesh APbcc84142015-08-05 03:27:48 -0400361 ether_addr_copy(netdev->dev_addr, addr->sa_data);
362 dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000363 return 0;
364err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530365 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700366 return status;
367}
368
Sathya Perlaca34fe382012-11-06 17:48:56 +0000369/* BE2 supports only v0 cmd */
370static void *hw_stats_from_cmd(struct be_adapter *adapter)
371{
372 if (BE2_chip(adapter)) {
373 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
374
375 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500376 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe382012-11-06 17:48:56 +0000377 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
378
379 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500380 } else {
381 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
382
383 return &cmd->hw_stats;
Sathya Perlaca34fe382012-11-06 17:48:56 +0000384 }
385}
386
387/* BE2 supports only v0 cmd */
388static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
389{
390 if (BE2_chip(adapter)) {
391 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
392
393 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500394 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe382012-11-06 17:48:56 +0000395 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
396
397 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500398 } else {
399 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
400
401 return &hw_stats->erx;
Sathya Perlaca34fe382012-11-06 17:48:56 +0000402 }
403}
404
405static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000406{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000407 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
408 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
409 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000410 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000411 &rxf_stats->port[adapter->port_num];
412 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000413
Sathya Perlaac124ff2011-07-25 19:10:14 +0000414 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000415 drvs->rx_pause_frames = port_stats->rx_pause_frames;
416 drvs->rx_crc_errors = port_stats->rx_crc_errors;
417 drvs->rx_control_frames = port_stats->rx_control_frames;
418 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
419 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
420 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
421 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
422 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
423 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
424 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
425 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
426 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
427 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
428 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000429 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000430 drvs->rx_dropped_header_too_small =
431 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000432 drvs->rx_address_filtered =
433 port_stats->rx_address_filtered +
434 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000435 drvs->rx_alignment_symbol_errors =
436 port_stats->rx_alignment_symbol_errors;
437
438 drvs->tx_pauseframes = port_stats->tx_pauseframes;
439 drvs->tx_controlframes = port_stats->tx_controlframes;
440
441 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000442 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000443 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000444 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000445 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000446 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000447 drvs->forwarded_packets = rxf_stats->forwarded_packets;
448 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000449 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
450 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000451 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
452}
453
Sathya Perlaca34fe382012-11-06 17:48:56 +0000454static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000455{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000456 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
457 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
458 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000459 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000460 &rxf_stats->port[adapter->port_num];
461 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000462
Sathya Perlaac124ff2011-07-25 19:10:14 +0000463 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000464 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
465 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000466 drvs->rx_pause_frames = port_stats->rx_pause_frames;
467 drvs->rx_crc_errors = port_stats->rx_crc_errors;
468 drvs->rx_control_frames = port_stats->rx_control_frames;
469 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
470 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
471 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
472 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
473 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
474 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
475 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
476 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
477 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
478 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
479 drvs->rx_dropped_header_too_small =
480 port_stats->rx_dropped_header_too_small;
481 drvs->rx_input_fifo_overflow_drop =
482 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000483 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000484 drvs->rx_alignment_symbol_errors =
485 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000486 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000487 drvs->tx_pauseframes = port_stats->tx_pauseframes;
488 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000489 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000490 drvs->jabber_events = port_stats->jabber_events;
491 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000492 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000493 drvs->forwarded_packets = rxf_stats->forwarded_packets;
494 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000495 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
496 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000497 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
498}
499
Ajit Khaparde61000862013-10-03 16:16:33 -0500500static void populate_be_v2_stats(struct be_adapter *adapter)
501{
502 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
503 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
504 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
505 struct be_port_rxf_stats_v2 *port_stats =
506 &rxf_stats->port[adapter->port_num];
507 struct be_drv_stats *drvs = &adapter->drv_stats;
508
509 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
510 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
511 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
512 drvs->rx_pause_frames = port_stats->rx_pause_frames;
513 drvs->rx_crc_errors = port_stats->rx_crc_errors;
514 drvs->rx_control_frames = port_stats->rx_control_frames;
515 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
516 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
517 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
518 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
519 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
520 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
521 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
522 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
523 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
524 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
525 drvs->rx_dropped_header_too_small =
526 port_stats->rx_dropped_header_too_small;
527 drvs->rx_input_fifo_overflow_drop =
528 port_stats->rx_input_fifo_overflow_drop;
529 drvs->rx_address_filtered = port_stats->rx_address_filtered;
530 drvs->rx_alignment_symbol_errors =
531 port_stats->rx_alignment_symbol_errors;
532 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
533 drvs->tx_pauseframes = port_stats->tx_pauseframes;
534 drvs->tx_controlframes = port_stats->tx_controlframes;
535 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
536 drvs->jabber_events = port_stats->jabber_events;
537 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
538 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
539 drvs->forwarded_packets = rxf_stats->forwarded_packets;
540 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
541 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
542 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
543 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Sathya Perla748b5392014-05-09 13:29:13 +0530544 if (be_roce_supported(adapter)) {
Ajit Khaparde461ae372013-10-03 16:16:50 -0500545 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
546 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
547 drvs->rx_roce_frames = port_stats->roce_frames_received;
548 drvs->roce_drops_crc = port_stats->roce_drops_crc;
549 drvs->roce_drops_payload_len =
550 port_stats->roce_drops_payload_len;
551 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500552}
553
Selvin Xavier005d5692011-05-16 07:36:35 +0000554static void populate_lancer_stats(struct be_adapter *adapter)
555{
Selvin Xavier005d5692011-05-16 07:36:35 +0000556 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla748b5392014-05-09 13:29:13 +0530557 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000558
559 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
560 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
561 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
562 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000563 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000564 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000565 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
566 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
567 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
568 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
569 drvs->rx_dropped_tcp_length =
570 pport_stats->rx_dropped_invalid_tcp_length;
571 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
572 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
573 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
574 drvs->rx_dropped_header_too_small =
575 pport_stats->rx_dropped_header_too_small;
576 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000577 drvs->rx_address_filtered =
578 pport_stats->rx_address_filtered +
579 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000580 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000581 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000582 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
583 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000584 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000585 drvs->forwarded_packets = pport_stats->num_forwards_lo;
586 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000587 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000588 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000589}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000590
Sathya Perla09c1c682011-08-22 19:41:53 +0000591static void accumulate_16bit_val(u32 *acc, u16 val)
592{
593#define lo(x) (x & 0xFFFF)
594#define hi(x) (x & 0xFFFF0000)
595 bool wrapped = val < lo(*acc);
596 u32 newacc = hi(*acc) + val;
597
598 if (wrapped)
599 newacc += 65536;
600 ACCESS_ONCE(*acc) = newacc;
601}
602
Jingoo Han4188e7d2013-08-05 18:02:02 +0900603static void populate_erx_stats(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530604 struct be_rx_obj *rxo, u32 erx_stat)
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000605{
606 if (!BEx_chip(adapter))
607 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
608 else
609 /* below erx HW counter can actually wrap around after
610 * 65535. Driver accumulates a 32-bit value
611 */
612 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
613 (u16)erx_stat);
614}
615
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000616void be_parse_stats(struct be_adapter *adapter)
617{
Ajit Khaparde61000862013-10-03 16:16:33 -0500618 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000619 struct be_rx_obj *rxo;
620 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000621 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000622
Sathya Perlaca34fe382012-11-06 17:48:56 +0000623 if (lancer_chip(adapter)) {
624 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000625 } else {
Sathya Perlaca34fe382012-11-06 17:48:56 +0000626 if (BE2_chip(adapter))
627 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500628 else if (BE3_chip(adapter))
629 /* for BE3 */
Sathya Perlaca34fe382012-11-06 17:48:56 +0000630 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500631 else
632 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000633
Ajit Khaparde61000862013-10-03 16:16:33 -0500634 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe382012-11-06 17:48:56 +0000635 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000636 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
637 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe382012-11-06 17:48:56 +0000638 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000639 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000640}
641
Sathya Perlaab1594e2011-07-25 19:10:15 +0000642static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530643 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700644{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000645 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000646 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700647 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000648 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000649 u64 pkts, bytes;
650 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700651 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652
Sathya Perla3abcded2010-10-03 22:12:27 -0700653 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000654 const struct be_rx_stats *rx_stats = rx_stats(rxo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530655
Sathya Perlaab1594e2011-07-25 19:10:15 +0000656 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700657 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000658 pkts = rx_stats(rxo)->rx_pkts;
659 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700660 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000661 stats->rx_packets += pkts;
662 stats->rx_bytes += bytes;
663 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
664 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
665 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700666 }
667
Sathya Perla3c8def92011-06-12 20:01:58 +0000668 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000669 const struct be_tx_stats *tx_stats = tx_stats(txo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530670
Sathya Perlaab1594e2011-07-25 19:10:15 +0000671 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700672 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000673 pkts = tx_stats(txo)->tx_pkts;
674 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700675 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000676 stats->tx_packets += pkts;
677 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000678 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700679
680 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000681 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000682 drvs->rx_alignment_symbol_errors +
683 drvs->rx_in_range_errors +
684 drvs->rx_out_range_errors +
685 drvs->rx_frame_too_long +
686 drvs->rx_dropped_too_small +
687 drvs->rx_dropped_too_short +
688 drvs->rx_dropped_header_too_small +
689 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000690 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700691
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700692 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000693 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000694 drvs->rx_out_range_errors +
695 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000696
Sathya Perlaab1594e2011-07-25 19:10:15 +0000697 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700698
699 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000700 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000701
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700702 /* receiver fifo overrun */
703 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000704 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000705 drvs->rx_input_fifo_overflow_drop +
706 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000707 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700708}
709
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000710void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700711{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700712 struct net_device *netdev = adapter->netdev;
713
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000714 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000715 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000716 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700717 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000718
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530719 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000720 netif_carrier_on(netdev);
721 else
722 netif_carrier_off(netdev);
Ivan Vecera18824892015-04-30 11:59:49 +0200723
724 netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700725}
726
Sriharsha Basavapatnaf3d6ad82016-10-09 09:58:52 +0530727static int be_gso_hdr_len(struct sk_buff *skb)
728{
729 if (skb->encapsulation)
730 return skb_inner_transport_offset(skb) +
731 inner_tcp_hdrlen(skb);
732 return skb_transport_offset(skb) + tcp_hdrlen(skb);
733}
734
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500735static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700736{
Sathya Perla3c8def92011-06-12 20:01:58 +0000737 struct be_tx_stats *stats = tx_stats(txo);
Sriharsha Basavapatnaf3d6ad82016-10-09 09:58:52 +0530738 u32 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
739 /* Account for headers which get duplicated in TSO pkt */
740 u32 dup_hdr_len = tx_pkts > 1 ? be_gso_hdr_len(skb) * (tx_pkts - 1) : 0;
Sathya Perla3c8def92011-06-12 20:01:58 +0000741
Sathya Perlaab1594e2011-07-25 19:10:15 +0000742 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000743 stats->tx_reqs++;
Sriharsha Basavapatnaf3d6ad82016-10-09 09:58:52 +0530744 stats->tx_bytes += skb->len + dup_hdr_len;
Sriharsha Basavapatna8670f2a2015-07-29 19:35:32 +0530745 stats->tx_pkts += tx_pkts;
746 if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
747 stats->tx_vxlan_offload_pkts += tx_pkts;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000748 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700749}
750
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500751/* Returns number of WRBs needed for the skb */
752static u32 skb_wrb_cnt(struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700753{
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500754 /* +1 for the header wrb */
755 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700756}
757
758static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
759{
Sathya Perlaf986afc2015-02-06 08:18:43 -0500760 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
761 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
762 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
763 wrb->rsvd0 = 0;
764}
765
766/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
767 * to avoid the swap and shift/mask operations in wrb_fill().
768 */
769static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
770{
771 wrb->frag_pa_hi = 0;
772 wrb->frag_pa_lo = 0;
773 wrb->frag_len = 0;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000774 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700775}
776
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000777static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530778 struct sk_buff *skb)
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000779{
780 u8 vlan_prio;
781 u16 vlan_tag;
782
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100783 vlan_tag = skb_vlan_tag_get(skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000784 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
785 /* If vlan priority provided by OS is NOT in available bmap */
786 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
787 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
Sathya Perlafdf81bf2015-12-30 01:29:01 -0500788 adapter->recommended_prio_bits;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000789
790 return vlan_tag;
791}
792
Sathya Perlac9c47142014-03-27 10:46:19 +0530793/* Used only for IP tunnel packets */
794static u16 skb_inner_ip_proto(struct sk_buff *skb)
795{
796 return (inner_ip_hdr(skb)->version == 4) ?
797 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
798}
799
800static u16 skb_ip_proto(struct sk_buff *skb)
801{
802 return (ip_hdr(skb)->version == 4) ?
803 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
804}
805
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +0530806static inline bool be_is_txq_full(struct be_tx_obj *txo)
807{
808 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
809}
810
811static inline bool be_can_txq_wake(struct be_tx_obj *txo)
812{
813 return atomic_read(&txo->q.used) < txo->q.len / 2;
814}
815
816static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
817{
818 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
819}
820
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530821static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
822 struct sk_buff *skb,
823 struct be_wrb_params *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700824{
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530825 u16 proto;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700826
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000827 if (skb_is_gso(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530828 BE_WRB_F_SET(wrb_params->features, LSO, 1);
829 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000830 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530831 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700832 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530833 if (skb->encapsulation) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530834 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530835 proto = skb_inner_ip_proto(skb);
836 } else {
837 proto = skb_ip_proto(skb);
838 }
839 if (proto == IPPROTO_TCP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530840 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530841 else if (proto == IPPROTO_UDP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530842 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700843 }
844
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100845 if (skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530846 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
847 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700848 }
849
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530850 BE_WRB_F_SET(wrb_params->features, CRC, 1);
851}
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500852
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530853static void wrb_fill_hdr(struct be_adapter *adapter,
854 struct be_eth_hdr_wrb *hdr,
855 struct be_wrb_params *wrb_params,
856 struct sk_buff *skb)
857{
858 memset(hdr, 0, sizeof(*hdr));
859
860 SET_TX_WRB_HDR_BITS(crc, hdr,
861 BE_WRB_F_GET(wrb_params->features, CRC));
862 SET_TX_WRB_HDR_BITS(ipcs, hdr,
863 BE_WRB_F_GET(wrb_params->features, IPCS));
864 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
865 BE_WRB_F_GET(wrb_params->features, TCPCS));
866 SET_TX_WRB_HDR_BITS(udpcs, hdr,
867 BE_WRB_F_GET(wrb_params->features, UDPCS));
868
869 SET_TX_WRB_HDR_BITS(lso, hdr,
870 BE_WRB_F_GET(wrb_params->features, LSO));
871 SET_TX_WRB_HDR_BITS(lso6, hdr,
872 BE_WRB_F_GET(wrb_params->features, LSO6));
873 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
874
875 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
876 * hack is not needed, the evt bit is set while ringing DB.
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500877 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530878 SET_TX_WRB_HDR_BITS(event, hdr,
879 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
880 SET_TX_WRB_HDR_BITS(vlan, hdr,
881 BE_WRB_F_GET(wrb_params->features, VLAN));
882 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
883
884 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
885 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
Venkata Duvvuru760c295e2015-05-13 13:00:14 +0530886 SET_TX_WRB_HDR_BITS(mgmt, hdr,
887 BE_WRB_F_GET(wrb_params->features, OS2BMC));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700888}
889
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000890static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla748b5392014-05-09 13:29:13 +0530891 bool unmap_single)
Sathya Perla7101e112010-03-22 20:41:12 +0000892{
893 dma_addr_t dma;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500894 u32 frag_len = le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000895
Sathya Perla7101e112010-03-22 20:41:12 +0000896
Sathya Perlaf986afc2015-02-06 08:18:43 -0500897 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
898 (u64)le32_to_cpu(wrb->frag_pa_lo);
899 if (frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000900 if (unmap_single)
Sathya Perlaf986afc2015-02-06 08:18:43 -0500901 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000902 else
Sathya Perlaf986afc2015-02-06 08:18:43 -0500903 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000904 }
905}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700906
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530907/* Grab a WRB header for xmit */
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530908static u32 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700909{
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530910 u32 head = txo->q.head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700911
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530912 queue_head_inc(&txo->q);
913 return head;
914}
915
916/* Set up the WRB header for xmit */
917static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
918 struct be_tx_obj *txo,
919 struct be_wrb_params *wrb_params,
920 struct sk_buff *skb, u16 head)
921{
922 u32 num_frags = skb_wrb_cnt(skb);
923 struct be_queue_info *txq = &txo->q;
924 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
925
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530926 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500927 be_dws_cpu_to_le(hdr, sizeof(*hdr));
928
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500929 BUG_ON(txo->sent_skb_list[head]);
930 txo->sent_skb_list[head] = skb;
931 txo->last_req_hdr = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530932 atomic_add(num_frags, &txq->used);
933 txo->last_req_wrb_cnt = num_frags;
934 txo->pend_wrb_cnt += num_frags;
935}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700936
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530937/* Setup a WRB fragment (buffer descriptor) for xmit */
938static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
939 int len)
940{
941 struct be_eth_wrb *wrb;
942 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700943
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530944 wrb = queue_head_node(txq);
945 wrb_fill(wrb, busaddr, len);
946 queue_head_inc(txq);
947}
948
949/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
950 * was invoked. The producer index is restored to the previous packet and the
951 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
952 */
953static void be_xmit_restore(struct be_adapter *adapter,
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530954 struct be_tx_obj *txo, u32 head, bool map_single,
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530955 u32 copied)
956{
957 struct device *dev;
958 struct be_eth_wrb *wrb;
959 struct be_queue_info *txq = &txo->q;
960
961 dev = &adapter->pdev->dev;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500962 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530963
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500964 /* skip the first wrb (hdr); it's not mapped */
965 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000966 while (copied) {
967 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000968 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000969 map_single = false;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500970 copied -= le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000971 queue_head_inc(txq);
972 }
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530973
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500974 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530975}
976
977/* Enqueue the given packet for transmit. This routine allocates WRBs for the
978 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
979 * of WRBs used up by the packet.
980 */
981static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
982 struct sk_buff *skb,
983 struct be_wrb_params *wrb_params)
984{
985 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
986 struct device *dev = &adapter->pdev->dev;
987 struct be_queue_info *txq = &txo->q;
988 bool map_single = false;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530989 u32 head = txq->head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530990 dma_addr_t busaddr;
991 int len;
992
993 head = be_tx_get_wrb_hdr(txo);
994
995 if (skb->len > skb->data_len) {
996 len = skb_headlen(skb);
997
998 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
999 if (dma_mapping_error(dev, busaddr))
1000 goto dma_err;
1001 map_single = true;
1002 be_tx_setup_wrb_frag(txo, busaddr, len);
1003 copied += len;
1004 }
1005
1006 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1007 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1008 len = skb_frag_size(frag);
1009
1010 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
1011 if (dma_mapping_error(dev, busaddr))
1012 goto dma_err;
1013 be_tx_setup_wrb_frag(txo, busaddr, len);
1014 copied += len;
1015 }
1016
1017 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
1018
1019 be_tx_stats_update(txo, skb);
1020 return wrb_cnt;
1021
1022dma_err:
1023 adapter->drv_stats.dma_map_errors++;
1024 be_xmit_restore(adapter, txo, head, map_single, copied);
Sathya Perla7101e112010-03-22 20:41:12 +00001025 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001026}
1027
Sathya Perlaf7062ee2015-02-06 08:18:35 -05001028static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
1029{
1030 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
1031}
1032
Somnath Kotur93040ae2012-06-26 22:32:10 +00001033static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001034 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301035 struct be_wrb_params
1036 *wrb_params)
Somnath Kotur93040ae2012-06-26 22:32:10 +00001037{
1038 u16 vlan_tag = 0;
1039
1040 skb = skb_share_check(skb, GFP_ATOMIC);
1041 if (unlikely(!skb))
1042 return skb;
1043
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001044 if (skb_vlan_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +00001045 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +05301046
1047 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
1048 if (!vlan_tag)
1049 vlan_tag = adapter->pvid;
1050 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
1051 * skip VLAN insertion
1052 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301053 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +05301054 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001055
1056 if (vlan_tag) {
Jiri Pirko62749e22014-11-19 14:04:58 +01001057 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1058 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001059 if (unlikely(!skb))
1060 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001061 skb->vlan_tci = 0;
1062 }
1063
1064 /* Insert the outer VLAN, if any */
1065 if (adapter->qnq_vid) {
1066 vlan_tag = adapter->qnq_vid;
Jiri Pirko62749e22014-11-19 14:04:58 +01001067 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1068 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001069 if (unlikely(!skb))
1070 return skb;
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301071 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001072 }
1073
Somnath Kotur93040ae2012-06-26 22:32:10 +00001074 return skb;
1075}
1076
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001077static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1078{
1079 struct ethhdr *eh = (struct ethhdr *)skb->data;
1080 u16 offset = ETH_HLEN;
1081
1082 if (eh->h_proto == htons(ETH_P_IPV6)) {
1083 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1084
1085 offset += sizeof(struct ipv6hdr);
1086 if (ip6h->nexthdr != NEXTHDR_TCP &&
1087 ip6h->nexthdr != NEXTHDR_UDP) {
1088 struct ipv6_opt_hdr *ehdr =
Kalesh AP504fbf12014-09-19 15:47:00 +05301089 (struct ipv6_opt_hdr *)(skb->data + offset);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001090
1091 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1092 if (ehdr->hdrlen == 0xff)
1093 return true;
1094 }
1095 }
1096 return false;
1097}
1098
1099static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1100{
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001101 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001102}
1103
Sathya Perla748b5392014-05-09 13:29:13 +05301104static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001105{
Sathya Perlaee9c7992013-05-22 23:04:55 +00001106 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001107}
1108
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301109static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1110 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301111 struct be_wrb_params
1112 *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001113{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001114 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001115 unsigned int eth_hdr_len;
1116 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +00001117
Ajit Khaparde1297f9d2013-04-24 11:52:28 +00001118 /* For padded packets, BE HW modifies tot_len field in IP header
1119 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001120 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001121 */
Sathya Perlaee9c7992013-05-22 23:04:55 +00001122 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1123 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001124 if (skb->len <= 60 &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001125 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +00001126 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +00001127 ip = (struct iphdr *)ip_hdr(skb);
1128 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1129 }
1130
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001131 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301132 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001133 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301134 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001135 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301136 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001137
Somnath Kotur93040ae2012-06-26 22:32:10 +00001138 /* HW has a bug wherein it will calculate CSUM for VLAN
1139 * pkts even though it is disabled.
1140 * Manually insert VLAN in pkt.
1141 */
1142 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001143 skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301144 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001145 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301146 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001147 }
1148
1149 /* HW may lockup when VLAN HW tagging is requested on
1150 * certain ipv6 packets. Drop such pkts if the HW workaround to
1151 * skip HW tagging is not enabled by FW.
1152 */
1153 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Kalesh APcd3307aa2014-09-19 15:47:02 +05301154 (adapter->pvid || adapter->qnq_vid) &&
1155 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001156 goto tx_drop;
1157
1158 /* Manual VLAN tag insertion to prevent:
1159 * ASIC lockup when the ASIC inserts VLAN tag into
1160 * certain ipv6 packets. Insert VLAN tags in driver,
1161 * and set event, completion, vlan bits accordingly
1162 * in the Tx WRB.
1163 */
1164 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1165 be_vlan_tag_tx_chk(adapter, skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301166 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001167 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301168 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001169 }
1170
Sathya Perlaee9c7992013-05-22 23:04:55 +00001171 return skb;
1172tx_drop:
1173 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +05301174err:
Sathya Perlaee9c7992013-05-22 23:04:55 +00001175 return NULL;
1176}
1177
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301178static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1179 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301180 struct be_wrb_params *wrb_params)
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301181{
ajit.khaparde@broadcom.com127bfce2016-02-23 00:35:01 +05301182 int err;
1183
Suresh Reddy8227e992015-10-12 03:47:19 -04001184 /* Lancer, SH and BE3 in SRIOV mode have a bug wherein
1185 * packets that are 32b or less may cause a transmit stall
1186 * on that port. The workaround is to pad such packets
1187 * (len <= 32 bytes) to a minimum length of 36b.
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301188 */
Suresh Reddy8227e992015-10-12 03:47:19 -04001189 if (skb->len <= 32) {
Alexander Duyck74b69392014-12-03 08:17:46 -08001190 if (skb_put_padto(skb, 36))
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301191 return NULL;
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301192 }
1193
1194 if (BEx_chip(adapter) || lancer_chip(adapter)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301195 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301196 if (!skb)
1197 return NULL;
1198 }
1199
ajit.khaparde@broadcom.com127bfce2016-02-23 00:35:01 +05301200 /* The stack can send us skbs with length greater than
1201 * what the HW can handle. Trim the extra bytes.
1202 */
1203 WARN_ON_ONCE(skb->len > BE_MAX_GSO_SIZE);
1204 err = pskb_trim(skb, BE_MAX_GSO_SIZE);
1205 WARN_ON(err);
1206
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301207 return skb;
1208}
1209
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001210static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1211{
1212 struct be_queue_info *txq = &txo->q;
1213 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1214
1215 /* Mark the last request eventable if it hasn't been marked already */
1216 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1217 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1218
1219 /* compose a dummy wrb if there are odd set of wrbs to notify */
1220 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
Sathya Perlaf986afc2015-02-06 08:18:43 -05001221 wrb_fill_dummy(queue_head_node(txq));
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001222 queue_head_inc(txq);
1223 atomic_inc(&txq->used);
1224 txo->pend_wrb_cnt++;
1225 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1226 TX_HDR_WRB_NUM_SHIFT);
1227 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1228 TX_HDR_WRB_NUM_SHIFT);
1229 }
1230 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1231 txo->pend_wrb_cnt = 0;
1232}
1233
Venkata Duvvuru760c295e2015-05-13 13:00:14 +05301234/* OS2BMC related */
1235
1236#define DHCP_CLIENT_PORT 68
1237#define DHCP_SERVER_PORT 67
1238#define NET_BIOS_PORT1 137
1239#define NET_BIOS_PORT2 138
1240#define DHCPV6_RAS_PORT 547
1241
1242#define is_mc_allowed_on_bmc(adapter, eh) \
1243 (!is_multicast_filt_enabled(adapter) && \
1244 is_multicast_ether_addr(eh->h_dest) && \
1245 !is_broadcast_ether_addr(eh->h_dest))
1246
1247#define is_bc_allowed_on_bmc(adapter, eh) \
1248 (!is_broadcast_filt_enabled(adapter) && \
1249 is_broadcast_ether_addr(eh->h_dest))
1250
1251#define is_arp_allowed_on_bmc(adapter, skb) \
1252 (is_arp(skb) && is_arp_filt_enabled(adapter))
1253
1254#define is_broadcast_packet(eh, adapter) \
1255 (is_multicast_ether_addr(eh->h_dest) && \
1256 !compare_ether_addr(eh->h_dest, adapter->netdev->broadcast))
1257
1258#define is_arp(skb) (skb->protocol == htons(ETH_P_ARP))
1259
1260#define is_arp_filt_enabled(adapter) \
1261 (adapter->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
1262
1263#define is_dhcp_client_filt_enabled(adapter) \
1264 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
1265
1266#define is_dhcp_srvr_filt_enabled(adapter) \
1267 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
1268
1269#define is_nbios_filt_enabled(adapter) \
1270 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
1271
1272#define is_ipv6_na_filt_enabled(adapter) \
1273 (adapter->bmc_filt_mask & \
1274 BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
1275
1276#define is_ipv6_ra_filt_enabled(adapter) \
1277 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
1278
1279#define is_ipv6_ras_filt_enabled(adapter) \
1280 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
1281
1282#define is_broadcast_filt_enabled(adapter) \
1283 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST)
1284
1285#define is_multicast_filt_enabled(adapter) \
1286 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
1287
1288static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
1289 struct sk_buff **skb)
1290{
1291 struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
1292 bool os2bmc = false;
1293
1294 if (!be_is_os2bmc_enabled(adapter))
1295 goto done;
1296
1297 if (!is_multicast_ether_addr(eh->h_dest))
1298 goto done;
1299
1300 if (is_mc_allowed_on_bmc(adapter, eh) ||
1301 is_bc_allowed_on_bmc(adapter, eh) ||
1302 is_arp_allowed_on_bmc(adapter, (*skb))) {
1303 os2bmc = true;
1304 goto done;
1305 }
1306
1307 if ((*skb)->protocol == htons(ETH_P_IPV6)) {
1308 struct ipv6hdr *hdr = ipv6_hdr((*skb));
1309 u8 nexthdr = hdr->nexthdr;
1310
1311 if (nexthdr == IPPROTO_ICMPV6) {
1312 struct icmp6hdr *icmp6 = icmp6_hdr((*skb));
1313
1314 switch (icmp6->icmp6_type) {
1315 case NDISC_ROUTER_ADVERTISEMENT:
1316 os2bmc = is_ipv6_ra_filt_enabled(adapter);
1317 goto done;
1318 case NDISC_NEIGHBOUR_ADVERTISEMENT:
1319 os2bmc = is_ipv6_na_filt_enabled(adapter);
1320 goto done;
1321 default:
1322 break;
1323 }
1324 }
1325 }
1326
1327 if (is_udp_pkt((*skb))) {
1328 struct udphdr *udp = udp_hdr((*skb));
1329
Venkat Duvvuru1645d992015-07-10 05:32:47 -04001330 switch (ntohs(udp->dest)) {
Venkata Duvvuru760c295e2015-05-13 13:00:14 +05301331 case DHCP_CLIENT_PORT:
1332 os2bmc = is_dhcp_client_filt_enabled(adapter);
1333 goto done;
1334 case DHCP_SERVER_PORT:
1335 os2bmc = is_dhcp_srvr_filt_enabled(adapter);
1336 goto done;
1337 case NET_BIOS_PORT1:
1338 case NET_BIOS_PORT2:
1339 os2bmc = is_nbios_filt_enabled(adapter);
1340 goto done;
1341 case DHCPV6_RAS_PORT:
1342 os2bmc = is_ipv6_ras_filt_enabled(adapter);
1343 goto done;
1344 default:
1345 break;
1346 }
1347 }
1348done:
1349 /* For packets over a vlan, which are destined
1350 * to BMC, asic expects the vlan to be inline in the packet.
1351 */
1352 if (os2bmc)
1353 *skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
1354
1355 return os2bmc;
1356}
1357
Sathya Perlaee9c7992013-05-22 23:04:55 +00001358static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1359{
1360 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001361 u16 q_idx = skb_get_queue_mapping(skb);
1362 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301363 struct be_wrb_params wrb_params = { 0 };
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301364 bool flush = !skb->xmit_more;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001365 u16 wrb_cnt;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001366
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301367 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001368 if (unlikely(!skb))
1369 goto drop;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001370
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301371 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1372
1373 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001374 if (unlikely(!wrb_cnt)) {
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001375 dev_kfree_skb_any(skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001376 goto drop;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001377 }
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001378
Venkata Duvvuru760c295e2015-05-13 13:00:14 +05301379 /* if os2bmc is enabled and if the pkt is destined to bmc,
1380 * enqueue the pkt a 2nd time with mgmt bit set.
1381 */
1382 if (be_send_pkt_to_bmc(adapter, &skb)) {
1383 BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
1384 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1385 if (unlikely(!wrb_cnt))
1386 goto drop;
1387 else
1388 skb_get(skb);
1389 }
1390
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05301391 if (be_is_txq_full(txo)) {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001392 netif_stop_subqueue(netdev, q_idx);
1393 tx_stats(txo)->tx_stops++;
1394 }
1395
1396 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1397 be_xmit_flush(adapter, txo);
1398
1399 return NETDEV_TX_OK;
1400drop:
1401 tx_stats(txo)->tx_drv_drops++;
1402 /* Flush the already enqueued tx requests */
1403 if (flush && txo->pend_wrb_cnt)
1404 be_xmit_flush(adapter, txo);
1405
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001406 return NETDEV_TX_OK;
1407}
1408
1409static int be_change_mtu(struct net_device *netdev, int new_mtu)
1410{
1411 struct be_adapter *adapter = netdev_priv(netdev);
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301412 struct device *dev = &adapter->pdev->dev;
1413
1414 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1415 dev_info(dev, "MTU must be between %d and %d bytes\n",
1416 BE_MIN_MTU, BE_MAX_MTU);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001417 return -EINVAL;
1418 }
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301419
1420 dev_info(dev, "MTU changed from %d to %d bytes\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301421 netdev->mtu, new_mtu);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001422 netdev->mtu = new_mtu;
1423 return 0;
1424}
1425
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001426static inline bool be_in_all_promisc(struct be_adapter *adapter)
1427{
1428 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1429 BE_IF_FLAGS_ALL_PROMISCUOUS;
1430}
1431
1432static int be_set_vlan_promisc(struct be_adapter *adapter)
1433{
1434 struct device *dev = &adapter->pdev->dev;
1435 int status;
1436
1437 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1438 return 0;
1439
1440 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1441 if (!status) {
1442 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1443 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1444 } else {
1445 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1446 }
1447 return status;
1448}
1449
1450static int be_clear_vlan_promisc(struct be_adapter *adapter)
1451{
1452 struct device *dev = &adapter->pdev->dev;
1453 int status;
1454
1455 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1456 if (!status) {
1457 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1458 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1459 }
1460 return status;
1461}
1462
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001463/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001464 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1465 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001466 */
Sathya Perla10329df2012-06-05 19:37:18 +00001467static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001468{
Vasundhara Volam50762662014-09-12 17:39:14 +05301469 struct device *dev = &adapter->pdev->dev;
Sathya Perla10329df2012-06-05 19:37:18 +00001470 u16 vids[BE_NUM_VLANS_SUPPORTED];
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301471 u16 num = 0, i = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001472 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001473
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001474 /* No need to change the VLAN state if the I/F is in promiscuous */
1475 if (adapter->netdev->flags & IFF_PROMISC)
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001476 return 0;
1477
Sathya Perla92bf14a2013-08-27 16:57:32 +05301478 if (adapter->vlans_added > be_max_vlans(adapter))
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001479 return be_set_vlan_promisc(adapter);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001480
Somnath Kotur841f60f2016-07-27 05:26:15 -04001481 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1482 status = be_clear_vlan_promisc(adapter);
1483 if (status)
1484 return status;
1485 }
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001486 /* Construct VLAN Table to give to HW */
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301487 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1488 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001489
Vasundhara Volam435452a2015-03-20 06:28:23 -04001490 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001491 if (status) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001492 dev_err(dev, "Setting HW VLAN filtering failed\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001493 /* Set to VLAN promisc mode as setting VLAN filter failed */
Kalesh AP77be8c12015-05-06 05:30:35 -04001494 if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
1495 addl_status(status) ==
Kalesh AP4c600052014-05-30 19:06:26 +05301496 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001497 return be_set_vlan_promisc(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001498 }
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001499 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001500}
1501
Patrick McHardy80d5c362013-04-19 02:04:28 +00001502static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001503{
1504 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001505 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001506
Sathya Perlab7172412016-07-27 05:26:18 -04001507 mutex_lock(&adapter->rx_filter_lock);
1508
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001509 /* Packets with VID 0 are always received by Lancer by default */
1510 if (lancer_chip(adapter) && vid == 0)
Sathya Perlab7172412016-07-27 05:26:18 -04001511 goto done;
Vasundhara Volam48291c22014-03-11 18:53:08 +05301512
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301513 if (test_bit(vid, adapter->vids))
Sathya Perlab7172412016-07-27 05:26:18 -04001514 goto done;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001515
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301516 set_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301517 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001518
Sathya Perlab7172412016-07-27 05:26:18 -04001519 status = be_vid_config(adapter);
1520done:
1521 mutex_unlock(&adapter->rx_filter_lock);
1522 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001523}
1524
Patrick McHardy80d5c362013-04-19 02:04:28 +00001525static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001526{
1527 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perlab7172412016-07-27 05:26:18 -04001528 int status = 0;
1529
1530 mutex_lock(&adapter->rx_filter_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001531
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001532 /* Packets with VID 0 are always received by Lancer by default */
1533 if (lancer_chip(adapter) && vid == 0)
Sathya Perlab7172412016-07-27 05:26:18 -04001534 goto done;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001535
Sriharsha Basavapatna41dcdfb2016-02-03 09:49:18 +05301536 if (!test_bit(vid, adapter->vids))
Sathya Perlab7172412016-07-27 05:26:18 -04001537 goto done;
Sriharsha Basavapatna41dcdfb2016-02-03 09:49:18 +05301538
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301539 clear_bit(vid, adapter->vids);
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301540 adapter->vlans_added--;
1541
Sathya Perlab7172412016-07-27 05:26:18 -04001542 status = be_vid_config(adapter);
1543done:
1544 mutex_unlock(&adapter->rx_filter_lock);
1545 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001546}
1547
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001548static void be_set_all_promisc(struct be_adapter *adapter)
1549{
1550 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1551 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1552}
1553
1554static void be_set_mc_promisc(struct be_adapter *adapter)
1555{
1556 int status;
1557
1558 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1559 return;
1560
1561 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1562 if (!status)
1563 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1564}
1565
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001566static void be_set_uc_promisc(struct be_adapter *adapter)
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001567{
1568 int status;
1569
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001570 if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS)
1571 return;
1572
1573 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, ON);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001574 if (!status)
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001575 adapter->if_flags |= BE_IF_FLAGS_PROMISCUOUS;
1576}
1577
1578static void be_clear_uc_promisc(struct be_adapter *adapter)
1579{
1580 int status;
1581
1582 if (!(adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS))
1583 return;
1584
1585 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, OFF);
1586 if (!status)
1587 adapter->if_flags &= ~BE_IF_FLAGS_PROMISCUOUS;
1588}
1589
1590/* The below 2 functions are the callback args for __dev_mc_sync/dev_uc_sync().
1591 * We use a single callback function for both sync and unsync. We really don't
1592 * add/remove addresses through this callback. But, we use it to detect changes
1593 * to the uc/mc lists. The entire uc/mc list is programmed in be_set_rx_mode().
1594 */
1595static int be_uc_list_update(struct net_device *netdev,
1596 const unsigned char *addr)
1597{
1598 struct be_adapter *adapter = netdev_priv(netdev);
1599
1600 adapter->update_uc_list = true;
1601 return 0;
1602}
1603
1604static int be_mc_list_update(struct net_device *netdev,
1605 const unsigned char *addr)
1606{
1607 struct be_adapter *adapter = netdev_priv(netdev);
1608
1609 adapter->update_mc_list = true;
1610 return 0;
1611}
1612
1613static void be_set_mc_list(struct be_adapter *adapter)
1614{
1615 struct net_device *netdev = adapter->netdev;
Sathya Perlab7172412016-07-27 05:26:18 -04001616 struct netdev_hw_addr *ha;
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001617 bool mc_promisc = false;
1618 int status;
1619
Sathya Perlab7172412016-07-27 05:26:18 -04001620 netif_addr_lock_bh(netdev);
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001621 __dev_mc_sync(netdev, be_mc_list_update, be_mc_list_update);
1622
1623 if (netdev->flags & IFF_PROMISC) {
1624 adapter->update_mc_list = false;
1625 } else if (netdev->flags & IFF_ALLMULTI ||
1626 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1627 /* Enable multicast promisc if num configured exceeds
1628 * what we support
1629 */
1630 mc_promisc = true;
1631 adapter->update_mc_list = false;
1632 } else if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS) {
1633 /* Update mc-list unconditionally if the iface was previously
1634 * in mc-promisc mode and now is out of that mode.
1635 */
1636 adapter->update_mc_list = true;
1637 }
1638
Sathya Perlab7172412016-07-27 05:26:18 -04001639 if (adapter->update_mc_list) {
1640 int i = 0;
1641
1642 /* cache the mc-list in adapter */
1643 netdev_for_each_mc_addr(ha, netdev) {
1644 ether_addr_copy(adapter->mc_list[i].mac, ha->addr);
1645 i++;
1646 }
1647 adapter->mc_count = netdev_mc_count(netdev);
1648 }
1649 netif_addr_unlock_bh(netdev);
1650
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001651 if (mc_promisc) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001652 be_set_mc_promisc(adapter);
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001653 } else if (adapter->update_mc_list) {
1654 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1655 if (!status)
1656 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1657 else
1658 be_set_mc_promisc(adapter);
1659
1660 adapter->update_mc_list = false;
1661 }
1662}
1663
1664static void be_clear_mc_list(struct be_adapter *adapter)
1665{
1666 struct net_device *netdev = adapter->netdev;
1667
1668 __dev_mc_unsync(netdev, NULL);
1669 be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, OFF);
Sathya Perlab7172412016-07-27 05:26:18 -04001670 adapter->mc_count = 0;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001671}
1672
Suresh Reddy988d44b2016-09-07 19:57:52 +05301673static int be_uc_mac_add(struct be_adapter *adapter, int uc_idx)
1674{
1675 if (ether_addr_equal((u8 *)&adapter->uc_list[uc_idx * ETH_ALEN],
Suresh Reddyc27ebf52016-09-07 19:57:53 +05301676 adapter->dev_mac)) {
Suresh Reddy988d44b2016-09-07 19:57:52 +05301677 adapter->pmac_id[uc_idx + 1] = adapter->pmac_id[0];
1678 return 0;
1679 }
1680
1681 return be_cmd_pmac_add(adapter,
1682 (u8 *)&adapter->uc_list[uc_idx * ETH_ALEN],
1683 adapter->if_handle,
1684 &adapter->pmac_id[uc_idx + 1], 0);
1685}
1686
1687static void be_uc_mac_del(struct be_adapter *adapter, int pmac_id)
1688{
1689 if (pmac_id == adapter->pmac_id[0])
1690 return;
1691
1692 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
1693}
1694
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001695static void be_set_uc_list(struct be_adapter *adapter)
1696{
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001697 struct net_device *netdev = adapter->netdev;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001698 struct netdev_hw_addr *ha;
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001699 bool uc_promisc = false;
Sathya Perlab7172412016-07-27 05:26:18 -04001700 int curr_uc_macs = 0, i;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001701
Sathya Perlab7172412016-07-27 05:26:18 -04001702 netif_addr_lock_bh(netdev);
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001703 __dev_uc_sync(netdev, be_uc_list_update, be_uc_list_update);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001704
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001705 if (netdev->flags & IFF_PROMISC) {
1706 adapter->update_uc_list = false;
1707 } else if (netdev_uc_count(netdev) > (be_max_uc(adapter) - 1)) {
1708 uc_promisc = true;
1709 adapter->update_uc_list = false;
1710 } else if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS) {
1711 /* Update uc-list unconditionally if the iface was previously
1712 * in uc-promisc mode and now is out of that mode.
1713 */
1714 adapter->update_uc_list = true;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001715 }
1716
Sathya Perlab7172412016-07-27 05:26:18 -04001717 if (adapter->update_uc_list) {
1718 i = 1; /* First slot is claimed by the Primary MAC */
1719
1720 /* cache the uc-list in adapter array */
1721 netdev_for_each_uc_addr(ha, netdev) {
1722 ether_addr_copy(adapter->uc_list[i].mac, ha->addr);
1723 i++;
1724 }
1725 curr_uc_macs = netdev_uc_count(netdev);
1726 }
1727 netif_addr_unlock_bh(netdev);
1728
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001729 if (uc_promisc) {
1730 be_set_uc_promisc(adapter);
1731 } else if (adapter->update_uc_list) {
1732 be_clear_uc_promisc(adapter);
1733
Sathya Perlab7172412016-07-27 05:26:18 -04001734 for (i = 0; i < adapter->uc_macs; i++)
Suresh Reddy988d44b2016-09-07 19:57:52 +05301735 be_uc_mac_del(adapter, adapter->pmac_id[i + 1]);
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001736
Sathya Perlab7172412016-07-27 05:26:18 -04001737 for (i = 0; i < curr_uc_macs; i++)
Suresh Reddy988d44b2016-09-07 19:57:52 +05301738 be_uc_mac_add(adapter, i);
Sathya Perlab7172412016-07-27 05:26:18 -04001739 adapter->uc_macs = curr_uc_macs;
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001740 adapter->update_uc_list = false;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001741 }
1742}
1743
1744static void be_clear_uc_list(struct be_adapter *adapter)
1745{
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001746 struct net_device *netdev = adapter->netdev;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001747 int i;
1748
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001749 __dev_uc_unsync(netdev, NULL);
Sathya Perlab7172412016-07-27 05:26:18 -04001750 for (i = 0; i < adapter->uc_macs; i++)
Suresh Reddy988d44b2016-09-07 19:57:52 +05301751 be_uc_mac_del(adapter, adapter->pmac_id[i + 1]);
1752
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001753 adapter->uc_macs = 0;
Somnath kotur7ad09452014-03-03 14:24:43 +05301754}
1755
Sathya Perlab7172412016-07-27 05:26:18 -04001756static void __be_set_rx_mode(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001757{
Sathya Perlab7172412016-07-27 05:26:18 -04001758 struct net_device *netdev = adapter->netdev;
1759
1760 mutex_lock(&adapter->rx_filter_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001761
1762 if (netdev->flags & IFF_PROMISC) {
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001763 if (!be_in_all_promisc(adapter))
1764 be_set_all_promisc(adapter);
1765 } else if (be_in_all_promisc(adapter)) {
1766 /* We need to re-program the vlan-list or clear
1767 * vlan-promisc mode (if needed) when the interface
1768 * comes out of promisc mode.
1769 */
1770 be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001771 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001772
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001773 be_set_uc_list(adapter);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001774 be_set_mc_list(adapter);
Sathya Perlab7172412016-07-27 05:26:18 -04001775
1776 mutex_unlock(&adapter->rx_filter_lock);
1777}
1778
1779static void be_work_set_rx_mode(struct work_struct *work)
1780{
1781 struct be_cmd_work *cmd_work =
1782 container_of(work, struct be_cmd_work, work);
1783
1784 __be_set_rx_mode(cmd_work->adapter);
1785 kfree(cmd_work);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001786}
1787
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001788static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1789{
1790 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001791 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001792 int status;
1793
Sathya Perla11ac75e2011-12-13 00:58:50 +00001794 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001795 return -EPERM;
1796
Sathya Perla11ac75e2011-12-13 00:58:50 +00001797 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001798 return -EINVAL;
1799
Vasundhara Volam3c31aaf2014-08-01 17:47:31 +05301800 /* Proceed further only if user provided MAC is different
1801 * from active MAC
1802 */
1803 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1804 return 0;
1805
Sathya Perla3175d8c2013-07-23 15:25:03 +05301806 if (BEx_chip(adapter)) {
1807 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1808 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001809
Sathya Perla11ac75e2011-12-13 00:58:50 +00001810 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1811 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301812 } else {
1813 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1814 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001815 }
1816
Kalesh APabccf232014-07-17 16:20:24 +05301817 if (status) {
1818 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1819 mac, vf, status);
1820 return be_cmd_status(status);
1821 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001822
Kalesh APabccf232014-07-17 16:20:24 +05301823 ether_addr_copy(vf_cfg->mac_addr, mac);
1824
1825 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001826}
1827
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001828static int be_get_vf_config(struct net_device *netdev, int vf,
Sathya Perla748b5392014-05-09 13:29:13 +05301829 struct ifla_vf_info *vi)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001830{
1831 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001832 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001833
Sathya Perla11ac75e2011-12-13 00:58:50 +00001834 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001835 return -EPERM;
1836
Sathya Perla11ac75e2011-12-13 00:58:50 +00001837 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001838 return -EINVAL;
1839
1840 vi->vf = vf;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001841 vi->max_tx_rate = vf_cfg->tx_rate;
1842 vi->min_tx_rate = 0;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001843 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1844 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001845 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301846 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Kalesh APe7bcbd72015-05-06 05:30:32 -04001847 vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001848
1849 return 0;
1850}
1851
Vasundhara Volam435452a2015-03-20 06:28:23 -04001852static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1853{
1854 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1855 u16 vids[BE_NUM_VLANS_SUPPORTED];
1856 int vf_if_id = vf_cfg->if_handle;
1857 int status;
1858
1859 /* Enable Transparent VLAN Tagging */
Kalesh APe7bcbd72015-05-06 05:30:32 -04001860 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001861 if (status)
1862 return status;
1863
1864 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1865 vids[0] = 0;
1866 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1867 if (!status)
1868 dev_info(&adapter->pdev->dev,
1869 "Cleared guest VLANs on VF%d", vf);
1870
1871 /* After TVT is enabled, disallow VFs to program VLAN filters */
1872 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1873 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1874 ~BE_PRIV_FILTMGMT, vf + 1);
1875 if (!status)
1876 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1877 }
1878 return 0;
1879}
1880
1881static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1882{
1883 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1884 struct device *dev = &adapter->pdev->dev;
1885 int status;
1886
1887 /* Reset Transparent VLAN Tagging. */
1888 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
Kalesh APe7bcbd72015-05-06 05:30:32 -04001889 vf_cfg->if_handle, 0, 0);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001890 if (status)
1891 return status;
1892
1893 /* Allow VFs to program VLAN filtering */
1894 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1895 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1896 BE_PRIV_FILTMGMT, vf + 1);
1897 if (!status) {
1898 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1899 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1900 }
1901 }
1902
1903 dev_info(dev,
1904 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1905 return 0;
1906}
1907
Moshe Shemesh79aab092016-09-22 12:11:15 +03001908static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
1909 __be16 vlan_proto)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001910{
1911 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001912 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Vasundhara Volam435452a2015-03-20 06:28:23 -04001913 int status;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001914
Sathya Perla11ac75e2011-12-13 00:58:50 +00001915 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001916 return -EPERM;
1917
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001918 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001919 return -EINVAL;
1920
Moshe Shemesh79aab092016-09-22 12:11:15 +03001921 if (vlan_proto != htons(ETH_P_8021Q))
1922 return -EPROTONOSUPPORT;
1923
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001924 if (vlan || qos) {
1925 vlan |= qos << VLAN_PRIO_SHIFT;
Vasundhara Volam435452a2015-03-20 06:28:23 -04001926 status = be_set_vf_tvt(adapter, vf, vlan);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001927 } else {
Vasundhara Volam435452a2015-03-20 06:28:23 -04001928 status = be_clear_vf_tvt(adapter, vf);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001929 }
1930
Kalesh APabccf232014-07-17 16:20:24 +05301931 if (status) {
1932 dev_err(&adapter->pdev->dev,
Vasundhara Volam435452a2015-03-20 06:28:23 -04001933 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1934 status);
Kalesh APabccf232014-07-17 16:20:24 +05301935 return be_cmd_status(status);
1936 }
1937
1938 vf_cfg->vlan_tag = vlan;
Kalesh APabccf232014-07-17 16:20:24 +05301939 return 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001940}
1941
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001942static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1943 int min_tx_rate, int max_tx_rate)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001944{
1945 struct be_adapter *adapter = netdev_priv(netdev);
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301946 struct device *dev = &adapter->pdev->dev;
1947 int percent_rate, status = 0;
1948 u16 link_speed = 0;
1949 u8 link_status;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001950
Sathya Perla11ac75e2011-12-13 00:58:50 +00001951 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001952 return -EPERM;
1953
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001954 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001955 return -EINVAL;
1956
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001957 if (min_tx_rate)
1958 return -EINVAL;
1959
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301960 if (!max_tx_rate)
1961 goto config_qos;
1962
1963 status = be_cmd_link_status_query(adapter, &link_speed,
1964 &link_status, 0);
1965 if (status)
1966 goto err;
1967
1968 if (!link_status) {
1969 dev_err(dev, "TX-rate setting not allowed when link is down\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05301970 status = -ENETDOWN;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301971 goto err;
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001972 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001973
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301974 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1975 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1976 link_speed);
1977 status = -EINVAL;
1978 goto err;
1979 }
1980
1981 /* On Skyhawk the QOS setting must be done only as a % value */
1982 percent_rate = link_speed / 100;
1983 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1984 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1985 percent_rate);
1986 status = -EINVAL;
1987 goto err;
1988 }
1989
1990config_qos:
1991 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001992 if (status)
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301993 goto err;
1994
1995 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1996 return 0;
1997
1998err:
1999 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
2000 max_tx_rate, vf);
Kalesh APabccf232014-07-17 16:20:24 +05302001 return be_cmd_status(status);
Ajit Khapardee1d18732010-07-23 01:52:13 +00002002}
Kalesh APe2fb1af2014-09-19 15:46:58 +05302003
Suresh Reddybdce2ad2014-03-11 18:53:04 +05302004static int be_set_vf_link_state(struct net_device *netdev, int vf,
2005 int link_state)
2006{
2007 struct be_adapter *adapter = netdev_priv(netdev);
2008 int status;
2009
2010 if (!sriov_enabled(adapter))
2011 return -EPERM;
2012
2013 if (vf >= adapter->num_vfs)
2014 return -EINVAL;
2015
2016 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
Kalesh APabccf232014-07-17 16:20:24 +05302017 if (status) {
2018 dev_err(&adapter->pdev->dev,
2019 "Link state change on VF %d failed: %#x\n", vf, status);
2020 return be_cmd_status(status);
2021 }
Suresh Reddybdce2ad2014-03-11 18:53:04 +05302022
Kalesh APabccf232014-07-17 16:20:24 +05302023 adapter->vf_cfg[vf].plink_tracking = link_state;
2024
2025 return 0;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05302026}
Ajit Khapardee1d18732010-07-23 01:52:13 +00002027
Kalesh APe7bcbd72015-05-06 05:30:32 -04002028static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
2029{
2030 struct be_adapter *adapter = netdev_priv(netdev);
2031 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
2032 u8 spoofchk;
2033 int status;
2034
2035 if (!sriov_enabled(adapter))
2036 return -EPERM;
2037
2038 if (vf >= adapter->num_vfs)
2039 return -EINVAL;
2040
2041 if (BEx_chip(adapter))
2042 return -EOPNOTSUPP;
2043
2044 if (enable == vf_cfg->spoofchk)
2045 return 0;
2046
2047 spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
2048
2049 status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
2050 0, spoofchk);
2051 if (status) {
2052 dev_err(&adapter->pdev->dev,
2053 "Spoofchk change on VF %d failed: %#x\n", vf, status);
2054 return be_cmd_status(status);
2055 }
2056
2057 vf_cfg->spoofchk = enable;
2058 return 0;
2059}
2060
Sathya Perla2632baf2013-10-01 16:00:00 +05302061static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
2062 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002063{
Sathya Perla2632baf2013-10-01 16:00:00 +05302064 aic->rx_pkts_prev = rx_pkts;
2065 aic->tx_reqs_prev = tx_pkts;
2066 aic->jiffies = now;
2067}
Sathya Perlaac124ff2011-07-25 19:10:14 +00002068
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002069static int be_get_new_eqd(struct be_eq_obj *eqo)
Sathya Perla2632baf2013-10-01 16:00:00 +05302070{
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002071 struct be_adapter *adapter = eqo->adapter;
2072 int eqd, start;
Sathya Perla2632baf2013-10-01 16:00:00 +05302073 struct be_aic_obj *aic;
Sathya Perla2632baf2013-10-01 16:00:00 +05302074 struct be_rx_obj *rxo;
2075 struct be_tx_obj *txo;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002076 u64 rx_pkts = 0, tx_pkts = 0;
Sathya Perla2632baf2013-10-01 16:00:00 +05302077 ulong now;
2078 u32 pps, delta;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002079 int i;
2080
2081 aic = &adapter->aic_obj[eqo->idx];
2082 if (!aic->enable) {
2083 if (aic->jiffies)
2084 aic->jiffies = 0;
2085 eqd = aic->et_eqd;
2086 return eqd;
2087 }
2088
2089 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2090 do {
2091 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
2092 rx_pkts += rxo->stats.rx_pkts;
2093 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
2094 }
2095
2096 for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
2097 do {
2098 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
2099 tx_pkts += txo->stats.tx_reqs;
2100 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
2101 }
2102
2103 /* Skip, if wrapped around or first calculation */
2104 now = jiffies;
2105 if (!aic->jiffies || time_before(now, aic->jiffies) ||
2106 rx_pkts < aic->rx_pkts_prev ||
2107 tx_pkts < aic->tx_reqs_prev) {
2108 be_aic_update(aic, rx_pkts, tx_pkts, now);
2109 return aic->prev_eqd;
2110 }
2111
2112 delta = jiffies_to_msecs(now - aic->jiffies);
2113 if (delta == 0)
2114 return aic->prev_eqd;
2115
2116 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
2117 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
2118 eqd = (pps / 15000) << 2;
2119
2120 if (eqd < 8)
2121 eqd = 0;
2122 eqd = min_t(u32, eqd, aic->max_eqd);
2123 eqd = max_t(u32, eqd, aic->min_eqd);
2124
2125 be_aic_update(aic, rx_pkts, tx_pkts, now);
2126
2127 return eqd;
2128}
2129
2130/* For Skyhawk-R only */
2131static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
2132{
2133 struct be_adapter *adapter = eqo->adapter;
2134 struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
2135 ulong now = jiffies;
2136 int eqd;
2137 u32 mult_enc;
2138
2139 if (!aic->enable)
2140 return 0;
2141
Padmanabh Ratnakar3c0d49a2016-02-03 09:49:23 +05302142 if (jiffies_to_msecs(now - aic->jiffies) < 1)
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002143 eqd = aic->prev_eqd;
2144 else
2145 eqd = be_get_new_eqd(eqo);
2146
2147 if (eqd > 100)
2148 mult_enc = R2I_DLY_ENC_1;
2149 else if (eqd > 60)
2150 mult_enc = R2I_DLY_ENC_2;
2151 else if (eqd > 20)
2152 mult_enc = R2I_DLY_ENC_3;
2153 else
2154 mult_enc = R2I_DLY_ENC_0;
2155
2156 aic->prev_eqd = eqd;
2157
2158 return mult_enc;
2159}
2160
2161void be_eqd_update(struct be_adapter *adapter, bool force_update)
2162{
2163 struct be_set_eqd set_eqd[MAX_EVT_QS];
2164 struct be_aic_obj *aic;
2165 struct be_eq_obj *eqo;
2166 int i, num = 0, eqd;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002167
Sathya Perla2632baf2013-10-01 16:00:00 +05302168 for_all_evt_queues(adapter, eqo, i) {
2169 aic = &adapter->aic_obj[eqo->idx];
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002170 eqd = be_get_new_eqd(eqo);
2171 if (force_update || eqd != aic->prev_eqd) {
Sathya Perla2632baf2013-10-01 16:00:00 +05302172 set_eqd[num].delay_multiplier = (eqd * 65)/100;
2173 set_eqd[num].eq_id = eqo->q.id;
2174 aic->prev_eqd = eqd;
2175 num++;
2176 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00002177 }
Sathya Perla2632baf2013-10-01 16:00:00 +05302178
2179 if (num)
2180 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07002181}
2182
Sathya Perla3abcded2010-10-03 22:12:27 -07002183static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla748b5392014-05-09 13:29:13 +05302184 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07002185{
Sathya Perlaac124ff2011-07-25 19:10:14 +00002186 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07002187
Sathya Perlaab1594e2011-07-25 19:10:15 +00002188 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07002189 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00002190 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07002191 stats->rx_pkts++;
Sriharsha Basavapatna8670f2a2015-07-29 19:35:32 +05302192 if (rxcp->tunneled)
2193 stats->rx_vxlan_offload_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00002194 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07002195 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00002196 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00002197 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00002198 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002199}
2200
Sathya Perla2e588f82011-03-11 02:49:26 +00002201static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07002202{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00002203 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05302204 * Also ignore ipcksm for ipv6 pkts
2205 */
Sathya Perla2e588f82011-03-11 02:49:26 +00002206 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05302207 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07002208}
2209
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302210static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002211{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002212 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002213 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07002214 struct be_queue_info *rxq = &rxo->q;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302215 u32 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002216
Sathya Perla3abcded2010-10-03 22:12:27 -07002217 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002218 BUG_ON(!rx_page_info->page);
2219
Sathya Perlae50287b2014-03-04 12:14:38 +05302220 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002221 dma_unmap_page(&adapter->pdev->dev,
2222 dma_unmap_addr(rx_page_info, bus),
2223 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05302224 rx_page_info->last_frag = false;
2225 } else {
2226 dma_sync_single_for_cpu(&adapter->pdev->dev,
2227 dma_unmap_addr(rx_page_info, bus),
2228 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00002229 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002230
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302231 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002232 atomic_dec(&rxq->used);
2233 return rx_page_info;
2234}
2235
2236/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002237static void be_rx_compl_discard(struct be_rx_obj *rxo,
2238 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002239{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002240 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00002241 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002242
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00002243 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302244 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00002245 put_page(page_info->page);
2246 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002247 }
2248}
2249
2250/*
2251 * skb_fill_rx_data forms a complete skb for an ether frame
2252 * indicated by rxcp.
2253 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002254static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
2255 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002256{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002257 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00002258 u16 i, j;
2259 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002260 u8 *start;
2261
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302262 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002263 start = page_address(page_info->page) + page_info->page_offset;
2264 prefetch(start);
2265
2266 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00002267 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002268
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002269 skb->len = curr_frag_len;
2270 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00002271 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002272 /* Complete packet has now been moved to data */
2273 put_page(page_info->page);
2274 skb->data_len = 0;
2275 skb->tail += curr_frag_len;
2276 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00002277 hdr_len = ETH_HLEN;
2278 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002279 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00002280 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002281 skb_shinfo(skb)->frags[0].page_offset =
2282 page_info->page_offset + hdr_len;
Sathya Perla748b5392014-05-09 13:29:13 +05302283 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
2284 curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002285 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002286 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002287 skb->tail += hdr_len;
2288 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00002289 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002290
Sathya Perla2e588f82011-03-11 02:49:26 +00002291 if (rxcp->pkt_size <= rx_frag_size) {
2292 BUG_ON(rxcp->num_rcvd != 1);
2293 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002294 }
2295
2296 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00002297 remaining = rxcp->pkt_size - curr_frag_len;
2298 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302299 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00002300 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002301
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002302 /* Coalesce all frags from the same physical page in one slot */
2303 if (page_info->page_offset == 0) {
2304 /* Fresh page */
2305 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00002306 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002307 skb_shinfo(skb)->frags[j].page_offset =
2308 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002309 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002310 skb_shinfo(skb)->nr_frags++;
2311 } else {
2312 put_page(page_info->page);
2313 }
2314
Eric Dumazet9e903e02011-10-18 21:00:24 +00002315 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002316 skb->len += curr_frag_len;
2317 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002318 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00002319 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00002320 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002321 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002322 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002323}
2324
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002325/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05302326static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002327 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002328{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002329 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002330 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002331 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00002332
Eric Dumazetbb349bb2012-01-25 03:56:30 +00002333 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00002334 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00002335 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002336 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002337 return;
2338 }
2339
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002340 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002341
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002342 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07002343 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00002344 else
2345 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002346
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002347 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00002348 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002349 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08002350 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05302351
Tom Herbertb6c0e892014-08-27 21:27:17 -07002352 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302353 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002354
Jiri Pirko343e43c2011-08-25 02:50:51 +00002355 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002356 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07002357
2358 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002359}
2360
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002361/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09002362static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
2363 struct napi_struct *napi,
2364 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002365{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002366 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002367 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002368 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00002369 u16 remaining, curr_frag_len;
2370 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00002371
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002372 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002373 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002374 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002375 return;
2376 }
2377
Sathya Perla2e588f82011-03-11 02:49:26 +00002378 remaining = rxcp->pkt_size;
2379 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302380 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002381
2382 curr_frag_len = min(remaining, rx_frag_size);
2383
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002384 /* Coalesce all frags from the same physical page in one slot */
2385 if (i == 0 || page_info->page_offset == 0) {
2386 /* First frag or Fresh page */
2387 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00002388 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002389 skb_shinfo(skb)->frags[j].page_offset =
2390 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002391 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002392 } else {
2393 put_page(page_info->page);
2394 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00002395 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002396 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002397 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002398 memset(page_info, 0, sizeof(*page_info));
2399 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002400 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002401
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002402 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00002403 skb->len = rxcp->pkt_size;
2404 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002405 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00002406 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00002407 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08002408 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05302409
Tom Herbertb6c0e892014-08-27 21:27:17 -07002410 skb->csum_level = rxcp->tunneled;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002411
Jiri Pirko343e43c2011-08-25 02:50:51 +00002412 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002413 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07002414
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002415 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002416}
2417
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002418static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
2419 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002420{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302421 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
2422 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
2423 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
2424 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
2425 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
2426 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
2427 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
2428 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
2429 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
2430 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
2431 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002432 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302433 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
2434 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002435 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302436 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05302437 rxcp->tunneled =
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302438 GET_RX_COMPL_V1_BITS(tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00002439}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002440
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002441static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
2442 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00002443{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302444 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
2445 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
2446 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
2447 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
2448 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
2449 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
2450 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
2451 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
2452 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
2453 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
2454 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002455 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302456 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
2457 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002458 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302459 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
2460 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00002461}
2462
2463static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
2464{
2465 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2466 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2467 struct be_adapter *adapter = rxo->adapter;
2468
2469 /* For checking the valid bit it is Ok to use either definition as the
2470 * valid bit is at the same position in both v0 and v1 Rx compl */
2471 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002472 return NULL;
2473
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002474 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00002475 be_dws_le_to_cpu(compl, sizeof(*compl));
2476
2477 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002478 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002479 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002480 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002481
Somnath Koture38b1702013-05-29 22:55:56 +00002482 if (rxcp->ip_frag)
2483 rxcp->l4_csum = 0;
2484
Sathya Perla15d72182011-03-21 20:49:26 +00002485 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05302486 /* In QNQ modes, if qnq bit is not set, then the packet was
2487 * tagged only with the transparent outer vlan-tag and must
2488 * not be treated as a vlan packet by host
2489 */
2490 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00002491 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00002492
Sathya Perla15d72182011-03-21 20:49:26 +00002493 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04002494 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00002495
Somnath Kotur939cf302011-08-18 21:51:49 -07002496 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05302497 !test_bit(rxcp->vlan_tag, adapter->vids))
Sathya Perla15d72182011-03-21 20:49:26 +00002498 rxcp->vlanf = 0;
2499 }
Sathya Perla2e588f82011-03-11 02:49:26 +00002500
2501 /* As the compl has been parsed, reset it; we wont touch it again */
2502 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002503
Sathya Perla3abcded2010-10-03 22:12:27 -07002504 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002505 return rxcp;
2506}
2507
Eric Dumazet1829b082011-03-01 05:48:12 +00002508static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002509{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002510 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00002511
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002512 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00002513 gfp |= __GFP_COMP;
2514 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002515}
2516
2517/*
2518 * Allocate a page, split it to fragments of size rx_frag_size and post as
2519 * receive buffers to BE
2520 */
Ajit Khapardec30d7262014-09-12 17:39:16 +05302521static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002522{
Sathya Perla3abcded2010-10-03 22:12:27 -07002523 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08002524 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07002525 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002526 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002527 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002528 struct be_eth_rx_d *rxd;
2529 u64 page_dmaaddr = 0, frag_dmaaddr;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302530 u32 posted, page_offset = 0, notify = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002531
Sathya Perla3abcded2010-10-03 22:12:27 -07002532 page_info = &rxo->page_info_tbl[rxq->head];
Ajit Khapardec30d7262014-09-12 17:39:16 +05302533 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002534 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00002535 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002536 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00002537 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002538 break;
2539 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01002540 page_dmaaddr = dma_map_page(dev, pagep, 0,
2541 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002542 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01002543 if (dma_mapping_error(dev, page_dmaaddr)) {
2544 put_page(pagep);
2545 pagep = NULL;
Vasundhara Volamd3de1542014-09-02 09:56:50 +05302546 adapter->drv_stats.dma_map_errors++;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002547 break;
2548 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302549 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002550 } else {
2551 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05302552 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002553 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302554 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002555 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002556
2557 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05302558 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002559 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2560 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002561
2562 /* Any space left in the current big page for another frag? */
2563 if ((page_offset + rx_frag_size + rx_frag_size) >
2564 adapter->big_page_size) {
2565 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05302566 page_info->last_frag = true;
2567 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2568 } else {
2569 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002570 }
Sathya Perla26d92f92010-01-21 22:52:08 -08002571
2572 prev_page_info = page_info;
2573 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002574 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002575 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302576
2577 /* Mark the last frag of a page when we break out of the above loop
2578 * with no more slots available in the RXQ
2579 */
2580 if (pagep) {
2581 prev_page_info->last_frag = true;
2582 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2583 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002584
2585 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002586 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302587 if (rxo->rx_post_starved)
2588 rxo->rx_post_starved = false;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302589 do {
Ajit Khaparde69304cc2015-04-08 16:59:48 -05002590 notify = min(MAX_NUM_POST_ERX_DB, posted);
Ajit Khapardec30d7262014-09-12 17:39:16 +05302591 be_rxq_notify(adapter, rxq->id, notify);
2592 posted -= notify;
2593 } while (posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07002594 } else if (atomic_read(&rxq->used) == 0) {
2595 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07002596 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002597 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002598}
2599
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302600static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002601{
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302602 struct be_queue_info *tx_cq = &txo->cq;
2603 struct be_tx_compl_info *txcp = &txo->txcp;
2604 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002605
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302606 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002607 return NULL;
2608
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302609 /* Ensure load ordering of valid bit dword and other dwords below */
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002610 rmb();
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302611 be_dws_le_to_cpu(compl, sizeof(*compl));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002612
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302613 txcp->status = GET_TX_COMPL_BITS(status, compl);
2614 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002615
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302616 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002617 queue_tail_inc(tx_cq);
2618 return txcp;
2619}
2620
Sathya Perla3c8def92011-06-12 20:01:58 +00002621static u16 be_tx_compl_process(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302622 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002623{
Sathya Perla3c8def92011-06-12 20:01:58 +00002624 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002625 struct be_queue_info *txq = &txo->q;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002626 struct sk_buff *skb = NULL;
2627 bool unmap_skb_hdr = false;
2628 struct be_eth_wrb *wrb;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302629 u16 num_wrbs = 0;
2630 u32 frag_index;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002631
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002632 do {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002633 if (sent_skbs[txq->tail]) {
2634 /* Free skb from prev req */
2635 if (skb)
2636 dev_consume_skb_any(skb);
2637 skb = sent_skbs[txq->tail];
2638 sent_skbs[txq->tail] = NULL;
2639 queue_tail_inc(txq); /* skip hdr wrb */
2640 num_wrbs++;
2641 unmap_skb_hdr = true;
2642 }
Alexander Duycka73b7962009-12-02 16:48:18 +00002643 wrb = queue_tail_node(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002644 frag_index = txq->tail;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002645 unmap_tx_frag(&adapter->pdev->dev, wrb,
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002646 (unmap_skb_hdr && skb_headlen(skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002647 unmap_skb_hdr = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002648 queue_tail_inc(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002649 num_wrbs++;
2650 } while (frag_index != last_index);
2651 dev_consume_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002652
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002653 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002654}
2655
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002656/* Return the number of events in the event queue */
2657static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00002658{
2659 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002660 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00002661
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002662 do {
2663 eqe = queue_tail_node(&eqo->q);
2664 if (eqe->evt == 0)
2665 break;
2666
2667 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00002668 eqe->evt = 0;
2669 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002670 queue_tail_inc(&eqo->q);
2671 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00002672
2673 return num;
2674}
2675
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002676/* Leaves the EQ is disarmed state */
2677static void be_eq_clean(struct be_eq_obj *eqo)
2678{
2679 int num = events_get(eqo);
2680
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002681 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002682}
2683
Kalesh AP99b44302015-08-05 03:27:49 -04002684/* Free posted rx buffers that were not used */
2685static void be_rxq_clean(struct be_rx_obj *rxo)
2686{
2687 struct be_queue_info *rxq = &rxo->q;
2688 struct be_rx_page_info *page_info;
2689
2690 while (atomic_read(&rxq->used) > 0) {
2691 page_info = get_rx_page_info(rxo);
2692 put_page(page_info->page);
2693 memset(page_info, 0, sizeof(*page_info));
2694 }
2695 BUG_ON(atomic_read(&rxq->used));
2696 rxq->tail = 0;
2697 rxq->head = 0;
2698}
2699
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002700static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002701{
Sathya Perla3abcded2010-10-03 22:12:27 -07002702 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002703 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00002704 struct be_adapter *adapter = rxo->adapter;
2705 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002706
Sathya Perlad23e9462012-12-17 19:38:51 +00002707 /* Consume pending rx completions.
2708 * Wait for the flush completion (identified by zero num_rcvd)
2709 * to arrive. Notify CQ even when there are no more CQ entries
2710 * for HW to flush partially coalesced CQ entries.
2711 * In Lancer, there is no need to wait for flush compl.
2712 */
2713 for (;;) {
2714 rxcp = be_rx_compl_get(rxo);
Kalesh APddf11692014-07-17 16:20:28 +05302715 if (!rxcp) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002716 if (lancer_chip(adapter))
2717 break;
2718
Venkata Duvvuru954f6822015-05-13 13:00:13 +05302719 if (flush_wait++ > 50 ||
2720 be_check_error(adapter,
2721 BE_ERROR_HW)) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002722 dev_warn(&adapter->pdev->dev,
2723 "did not receive flush compl\n");
2724 break;
2725 }
2726 be_cq_notify(adapter, rx_cq->id, true, 0);
2727 mdelay(1);
2728 } else {
2729 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002730 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002731 if (rxcp->num_rcvd == 0)
2732 break;
2733 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002734 }
2735
Sathya Perlad23e9462012-12-17 19:38:51 +00002736 /* After cleanup, leave the CQ in unarmed state */
2737 be_cq_notify(adapter, rx_cq->id, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002738}
2739
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002740static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002741{
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002742 struct device *dev = &adapter->pdev->dev;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302743 u16 cmpl = 0, timeo = 0, num_wrbs = 0;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302744 struct be_tx_compl_info *txcp;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002745 struct be_queue_info *txq;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302746 u32 end_idx, notified_idx;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302747 struct be_tx_obj *txo;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002748 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002749
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302750 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002751 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002752 pending_txqs = adapter->num_tx_qs;
2753
2754 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302755 cmpl = 0;
2756 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002757 txq = &txo->q;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302758 while ((txcp = be_tx_compl_get(txo))) {
2759 num_wrbs +=
2760 be_tx_compl_process(adapter, txo,
2761 txcp->end_index);
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002762 cmpl++;
2763 }
2764 if (cmpl) {
2765 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2766 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302767 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002768 }
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05302769 if (!be_is_tx_compl_pending(txo))
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002770 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002771 }
2772
Venkata Duvvuru954f6822015-05-13 13:00:13 +05302773 if (pending_txqs == 0 || ++timeo > 10 ||
2774 be_check_error(adapter, BE_ERROR_HW))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002775 break;
2776
2777 mdelay(1);
2778 } while (true);
2779
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002780 /* Free enqueued TX that was never notified to HW */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002781 for_all_tx_queues(adapter, txo, i) {
2782 txq = &txo->q;
Sathya Perlab03388d2010-02-18 00:37:17 +00002783
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002784 if (atomic_read(&txq->used)) {
2785 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2786 i, atomic_read(&txq->used));
2787 notified_idx = txq->tail;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002788 end_idx = txq->tail;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002789 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2790 txq->len);
2791 /* Use the tx-compl process logic to handle requests
2792 * that were not sent to the HW.
2793 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002794 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2795 atomic_sub(num_wrbs, &txq->used);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002796 BUG_ON(atomic_read(&txq->used));
2797 txo->pend_wrb_cnt = 0;
2798 /* Since hw was never notified of these requests,
2799 * reset TXQ indices
2800 */
2801 txq->head = notified_idx;
2802 txq->tail = notified_idx;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002803 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002804 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002805}
2806
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002807static void be_evt_queues_destroy(struct be_adapter *adapter)
2808{
2809 struct be_eq_obj *eqo;
2810 int i;
2811
2812 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002813 if (eqo->q.created) {
2814 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002815 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302816 netif_napi_del(&eqo->napi);
Kalesh AP649886a2015-08-05 03:27:50 -04002817 free_cpumask_var(eqo->affinity_mask);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002818 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002819 be_queue_free(adapter, &eqo->q);
2820 }
2821}
2822
2823static int be_evt_queues_create(struct be_adapter *adapter)
2824{
2825 struct be_queue_info *eq;
2826 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302827 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002828 int i, rc;
2829
Sathya Perlae2617682016-06-22 08:54:54 -04002830 /* need enough EQs to service both RX and TX queues */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302831 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
Sathya Perlae2617682016-06-22 08:54:54 -04002832 max(adapter->cfg_num_rx_irqs,
2833 adapter->cfg_num_tx_irqs));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002834
2835 for_all_evt_queues(adapter, eqo, i) {
Rusty Russellf36963c2015-05-09 03:14:13 +09302836 int numa_node = dev_to_node(&adapter->pdev->dev);
Kalesh AP649886a2015-08-05 03:27:50 -04002837
Sathya Perla2632baf2013-10-01 16:00:00 +05302838 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002839 eqo->adapter = adapter;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002840 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302841 aic->max_eqd = BE_MAX_EQD;
2842 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002843
2844 eq = &eqo->q;
2845 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302846 sizeof(struct be_eq_entry));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002847 if (rc)
2848 return rc;
2849
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302850 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002851 if (rc)
2852 return rc;
Kalesh AP649886a2015-08-05 03:27:50 -04002853
2854 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2855 return -ENOMEM;
2856 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2857 eqo->affinity_mask);
2858 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2859 BE_NAPI_WEIGHT);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002860 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002861 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002862}
2863
Sathya Perla5fb379e2009-06-18 00:02:59 +00002864static void be_mcc_queues_destroy(struct be_adapter *adapter)
2865{
2866 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002867
Sathya Perla8788fdc2009-07-27 22:52:03 +00002868 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002869 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002870 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002871 be_queue_free(adapter, q);
2872
Sathya Perla8788fdc2009-07-27 22:52:03 +00002873 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002874 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002875 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002876 be_queue_free(adapter, q);
2877}
2878
2879/* Must be called only after TX qs are created as MCC shares TX EQ */
2880static int be_mcc_queues_create(struct be_adapter *adapter)
2881{
2882 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002883
Sathya Perla8788fdc2009-07-27 22:52:03 +00002884 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002885 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302886 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002887 goto err;
2888
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002889 /* Use the default EQ for MCC completions */
2890 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002891 goto mcc_cq_free;
2892
Sathya Perla8788fdc2009-07-27 22:52:03 +00002893 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002894 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2895 goto mcc_cq_destroy;
2896
Sathya Perla8788fdc2009-07-27 22:52:03 +00002897 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002898 goto mcc_q_free;
2899
2900 return 0;
2901
2902mcc_q_free:
2903 be_queue_free(adapter, q);
2904mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002905 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002906mcc_cq_free:
2907 be_queue_free(adapter, cq);
2908err:
2909 return -1;
2910}
2911
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002912static void be_tx_queues_destroy(struct be_adapter *adapter)
2913{
2914 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002915 struct be_tx_obj *txo;
2916 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002917
Sathya Perla3c8def92011-06-12 20:01:58 +00002918 for_all_tx_queues(adapter, txo, i) {
2919 q = &txo->q;
2920 if (q->created)
2921 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2922 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002923
Sathya Perla3c8def92011-06-12 20:01:58 +00002924 q = &txo->cq;
2925 if (q->created)
2926 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2927 be_queue_free(adapter, q);
2928 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002929}
2930
Sathya Perla77071332013-08-27 16:57:34 +05302931static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002932{
Sathya Perla73f394e2015-03-26 03:05:09 -04002933 struct be_queue_info *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002934 struct be_tx_obj *txo;
Sathya Perla73f394e2015-03-26 03:05:09 -04002935 struct be_eq_obj *eqo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302936 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002937
Sathya Perlae2617682016-06-22 08:54:54 -04002938 adapter->num_tx_qs = min(adapter->num_evt_qs, adapter->cfg_num_tx_irqs);
Sathya Perladafc0fe2011-10-24 02:45:02 +00002939
Sathya Perla3c8def92011-06-12 20:01:58 +00002940 for_all_tx_queues(adapter, txo, i) {
2941 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002942 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2943 sizeof(struct be_eth_tx_compl));
2944 if (status)
2945 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002946
John Stultz827da442013-10-07 15:51:58 -07002947 u64_stats_init(&txo->stats.sync);
2948 u64_stats_init(&txo->stats.sync_compl);
2949
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002950 /* If num_evt_qs is less than num_tx_qs, then more than
2951 * one txq share an eq
2952 */
Sathya Perla73f394e2015-03-26 03:05:09 -04002953 eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
2954 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002955 if (status)
2956 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002957
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002958 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2959 sizeof(struct be_eth_wrb));
2960 if (status)
2961 return status;
2962
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002963 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002964 if (status)
2965 return status;
Sathya Perla73f394e2015-03-26 03:05:09 -04002966
2967 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
2968 eqo->idx);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002969 }
2970
Sathya Perlad3791422012-09-28 04:39:44 +00002971 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2972 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002973 return 0;
2974}
2975
2976static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002977{
2978 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002979 struct be_rx_obj *rxo;
2980 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002981
Sathya Perla3abcded2010-10-03 22:12:27 -07002982 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002983 q = &rxo->cq;
2984 if (q->created)
2985 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2986 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002987 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002988}
2989
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002990static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002991{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002992 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002993 struct be_rx_obj *rxo;
2994 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002995
Sathya Perlae2617682016-06-22 08:54:54 -04002996 adapter->num_rss_qs =
2997 min(adapter->num_evt_qs, adapter->cfg_num_rx_irqs);
Sathya Perla92bf14a2013-08-27 16:57:32 +05302998
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002999 /* We'll use RSS only if atleast 2 RSS rings are supported. */
Sathya Perlae2617682016-06-22 08:54:54 -04003000 if (adapter->num_rss_qs < 2)
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003001 adapter->num_rss_qs = 0;
3002
3003 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
3004
3005 /* When the interface is not capable of RSS rings (and there is no
3006 * need to create a default RXQ) we'll still need one RXQ
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003007 */
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003008 if (adapter->num_rx_qs == 0)
3009 adapter->num_rx_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303010
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003011 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07003012 for_all_rx_queues(adapter, rxo, i) {
3013 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07003014 cq = &rxo->cq;
3015 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05303016 sizeof(struct be_eth_rx_compl));
Sathya Perla3abcded2010-10-03 22:12:27 -07003017 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003018 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003019
John Stultz827da442013-10-07 15:51:58 -07003020 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003021 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
3022 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07003023 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003024 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07003025 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003026
Sathya Perlad3791422012-09-28 04:39:44 +00003027 dev_info(&adapter->pdev->dev,
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003028 "created %d RX queue(s)\n", adapter->num_rx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003029 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00003030}
3031
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003032static irqreturn_t be_intx(int irq, void *dev)
3033{
Sathya Perlae49cc342012-11-27 19:50:02 +00003034 struct be_eq_obj *eqo = dev;
3035 struct be_adapter *adapter = eqo->adapter;
3036 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003037
Sathya Perlad0b9cec2013-01-11 22:47:02 +00003038 /* IRQ is not expected when NAPI is scheduled as the EQ
3039 * will not be armed.
3040 * But, this can happen on Lancer INTx where it takes
3041 * a while to de-assert INTx or in BE2 where occasionaly
3042 * an interrupt may be raised even when EQ is unarmed.
3043 * If NAPI is already scheduled, then counting & notifying
3044 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00003045 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00003046 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00003047 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00003048 __napi_schedule(&eqo->napi);
3049 if (num_evts)
3050 eqo->spurious_intr = 0;
3051 }
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003052 be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00003053
3054 /* Return IRQ_HANDLED only for the the first spurious intr
3055 * after a valid intr to stop the kernel from branding
3056 * this irq as a bad one!
3057 */
3058 if (num_evts || eqo->spurious_intr++ == 0)
3059 return IRQ_HANDLED;
3060 else
3061 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003062}
3063
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003064static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003065{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003066 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003067
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003068 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
Sathya Perla0b545a62012-11-23 00:27:18 +00003069 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003070 return IRQ_HANDLED;
3071}
3072
Sathya Perla2e588f82011-03-11 02:49:26 +00003073static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003074{
Somnath Koture38b1702013-05-29 22:55:56 +00003075 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003076}
3077
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003078static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla748b5392014-05-09 13:29:13 +05303079 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003080{
Sathya Perla3abcded2010-10-03 22:12:27 -07003081 struct be_adapter *adapter = rxo->adapter;
3082 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00003083 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003084 u32 work_done;
Ajit Khapardec30d7262014-09-12 17:39:16 +05303085 u32 frags_consumed = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003086
3087 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07003088 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003089 if (!rxcp)
3090 break;
3091
Sathya Perla12004ae2011-08-02 19:57:46 +00003092 /* Is it a flush compl that has no data */
3093 if (unlikely(rxcp->num_rcvd == 0))
3094 goto loop_continue;
3095
3096 /* Discard compl with partial DMA Lancer B0 */
3097 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003098 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00003099 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00003100 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00003101
Sathya Perla12004ae2011-08-02 19:57:46 +00003102 /* On BE drop pkts that arrive due to imperfect filtering in
3103 * promiscuous mode on some skews
3104 */
3105 if (unlikely(rxcp->port != adapter->port_num &&
Sathya Perla748b5392014-05-09 13:29:13 +05303106 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003107 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00003108 goto loop_continue;
3109 }
3110
Sathya Perla6384a4d2013-10-25 10:40:16 +05303111 /* Don't do gro when we're busy_polling */
3112 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003113 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00003114 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05303115 be_rx_compl_process(rxo, napi, rxcp);
3116
Sathya Perla12004ae2011-08-02 19:57:46 +00003117loop_continue:
Ajit Khapardec30d7262014-09-12 17:39:16 +05303118 frags_consumed += rxcp->num_rcvd;
Sathya Perla2e588f82011-03-11 02:49:26 +00003119 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003120 }
3121
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003122 if (work_done) {
3123 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00003124
Sathya Perla6384a4d2013-10-25 10:40:16 +05303125 /* When an rx-obj gets into post_starved state, just
3126 * let be_worker do the posting.
3127 */
3128 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
3129 !rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05303130 be_post_rx_frags(rxo, GFP_ATOMIC,
3131 max_t(u32, MAX_RX_POST,
3132 frags_consumed));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003133 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003134
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003135 return work_done;
3136}
3137
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303138static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05303139{
3140 switch (status) {
3141 case BE_TX_COMP_HDR_PARSE_ERR:
3142 tx_stats(txo)->tx_hdr_parse_err++;
3143 break;
3144 case BE_TX_COMP_NDMA_ERR:
3145 tx_stats(txo)->tx_dma_err++;
3146 break;
3147 case BE_TX_COMP_ACL_ERR:
3148 tx_stats(txo)->tx_spoof_check_err++;
3149 break;
3150 }
3151}
3152
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303153static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05303154{
3155 switch (status) {
3156 case LANCER_TX_COMP_LSO_ERR:
3157 tx_stats(txo)->tx_tso_err++;
3158 break;
3159 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
3160 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
3161 tx_stats(txo)->tx_spoof_check_err++;
3162 break;
3163 case LANCER_TX_COMP_QINQ_ERR:
3164 tx_stats(txo)->tx_qinq_err++;
3165 break;
3166 case LANCER_TX_COMP_PARITY_ERR:
3167 tx_stats(txo)->tx_internal_parity_err++;
3168 break;
3169 case LANCER_TX_COMP_DMA_ERR:
3170 tx_stats(txo)->tx_dma_err++;
3171 break;
3172 }
3173}
3174
Sathya Perlac8f64612014-09-02 09:56:55 +05303175static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
3176 int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003177{
Sathya Perlac8f64612014-09-02 09:56:55 +05303178 int num_wrbs = 0, work_done = 0;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303179 struct be_tx_compl_info *txcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003180
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303181 while ((txcp = be_tx_compl_get(txo))) {
3182 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
Sathya Perlac8f64612014-09-02 09:56:55 +05303183 work_done++;
3184
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303185 if (txcp->status) {
Kalesh AP512bb8a2014-09-02 09:56:49 +05303186 if (lancer_chip(adapter))
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303187 lancer_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05303188 else
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303189 be_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05303190 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003191 }
3192
3193 if (work_done) {
3194 be_cq_notify(adapter, txo->cq.id, true, work_done);
3195 atomic_sub(num_wrbs, &txo->q.used);
3196
3197 /* As Tx wrbs have been freed up, wake up netdev queue
3198 * if it was stopped due to lack of tx wrbs. */
3199 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05303200 be_can_txq_wake(txo)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003201 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00003202 }
Sathya Perla3c8def92011-06-12 20:01:58 +00003203
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003204 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
3205 tx_stats(txo)->tx_compl += work_done;
3206 u64_stats_update_end(&tx_stats(txo)->sync_compl);
3207 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003208}
Sathya Perla3c8def92011-06-12 20:01:58 +00003209
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003210#ifdef CONFIG_NET_RX_BUSY_POLL
3211static inline bool be_lock_napi(struct be_eq_obj *eqo)
3212{
3213 bool status = true;
3214
3215 spin_lock(&eqo->lock); /* BH is already disabled */
3216 if (eqo->state & BE_EQ_LOCKED) {
3217 WARN_ON(eqo->state & BE_EQ_NAPI);
3218 eqo->state |= BE_EQ_NAPI_YIELD;
3219 status = false;
3220 } else {
3221 eqo->state = BE_EQ_NAPI;
3222 }
3223 spin_unlock(&eqo->lock);
3224 return status;
3225}
3226
3227static inline void be_unlock_napi(struct be_eq_obj *eqo)
3228{
3229 spin_lock(&eqo->lock); /* BH is already disabled */
3230
3231 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
3232 eqo->state = BE_EQ_IDLE;
3233
3234 spin_unlock(&eqo->lock);
3235}
3236
3237static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3238{
3239 bool status = true;
3240
3241 spin_lock_bh(&eqo->lock);
3242 if (eqo->state & BE_EQ_LOCKED) {
3243 eqo->state |= BE_EQ_POLL_YIELD;
3244 status = false;
3245 } else {
3246 eqo->state |= BE_EQ_POLL;
3247 }
3248 spin_unlock_bh(&eqo->lock);
3249 return status;
3250}
3251
3252static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3253{
3254 spin_lock_bh(&eqo->lock);
3255
3256 WARN_ON(eqo->state & (BE_EQ_NAPI));
3257 eqo->state = BE_EQ_IDLE;
3258
3259 spin_unlock_bh(&eqo->lock);
3260}
3261
3262static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3263{
3264 spin_lock_init(&eqo->lock);
3265 eqo->state = BE_EQ_IDLE;
3266}
3267
3268static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3269{
3270 local_bh_disable();
3271
3272 /* It's enough to just acquire napi lock on the eqo to stop
3273 * be_busy_poll() from processing any queueus.
3274 */
3275 while (!be_lock_napi(eqo))
3276 mdelay(1);
3277
3278 local_bh_enable();
3279}
3280
3281#else /* CONFIG_NET_RX_BUSY_POLL */
3282
3283static inline bool be_lock_napi(struct be_eq_obj *eqo)
3284{
3285 return true;
3286}
3287
3288static inline void be_unlock_napi(struct be_eq_obj *eqo)
3289{
3290}
3291
3292static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3293{
3294 return false;
3295}
3296
3297static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3298{
3299}
3300
3301static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3302{
3303}
3304
3305static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3306{
3307}
3308#endif /* CONFIG_NET_RX_BUSY_POLL */
3309
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303310int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003311{
3312 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3313 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00003314 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05303315 struct be_rx_obj *rxo;
Sathya Perlaa4906ea2014-09-02 09:56:56 +05303316 struct be_tx_obj *txo;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003317 u32 mult_enc = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +00003318
Sathya Perla0b545a62012-11-23 00:27:18 +00003319 num_evts = events_get(eqo);
3320
Sathya Perlaa4906ea2014-09-02 09:56:56 +05303321 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
3322 be_process_tx(adapter, txo, i);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003323
Sathya Perla6384a4d2013-10-25 10:40:16 +05303324 if (be_lock_napi(eqo)) {
3325 /* This loop will iterate twice for EQ0 in which
3326 * completions of the last RXQ (default one) are also processed
3327 * For other EQs the loop iterates only once
3328 */
3329 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3330 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
3331 max_work = max(work, max_work);
3332 }
3333 be_unlock_napi(eqo);
3334 } else {
3335 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08003336 }
3337
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003338 if (is_mcc_eqo(eqo))
3339 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003340
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003341 if (max_work < budget) {
3342 napi_complete(napi);
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003343
3344 /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
3345 * delay via a delay multiplier encoding value
3346 */
3347 if (skyhawk_chip(adapter))
3348 mult_enc = be_get_eq_delay_mult_enc(eqo);
3349
3350 be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
3351 mult_enc);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003352 } else {
3353 /* As we'll continue in polling mode, count and clear events */
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003354 be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00003355 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003356 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003357}
3358
Sathya Perla6384a4d2013-10-25 10:40:16 +05303359#ifdef CONFIG_NET_RX_BUSY_POLL
3360static int be_busy_poll(struct napi_struct *napi)
3361{
3362 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3363 struct be_adapter *adapter = eqo->adapter;
3364 struct be_rx_obj *rxo;
3365 int i, work = 0;
3366
3367 if (!be_lock_busy_poll(eqo))
3368 return LL_FLUSH_BUSY;
3369
3370 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3371 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
3372 if (work)
3373 break;
3374 }
3375
3376 be_unlock_busy_poll(eqo);
3377 return work;
3378}
3379#endif
3380
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003381void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00003382{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003383 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
3384 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00003385 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303386 struct device *dev = &adapter->pdev->dev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00003387
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303388 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perla72f02482011-11-10 19:17:58 +00003389 return;
3390
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003391 if (lancer_chip(adapter)) {
3392 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3393 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303394 be_set_error(adapter, BE_ERROR_UE);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003395 sliport_err1 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05303396 SLIPORT_ERROR1_OFFSET);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003397 sliport_err2 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05303398 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303399 /* Do not log error messages if its a FW reset */
3400 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
3401 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
3402 dev_info(dev, "Firmware update in progress\n");
3403 } else {
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303404 dev_err(dev, "Error detected in the card\n");
3405 dev_err(dev, "ERR: sliport status 0x%x\n",
3406 sliport_status);
3407 dev_err(dev, "ERR: sliport error1 0x%x\n",
3408 sliport_err1);
3409 dev_err(dev, "ERR: sliport error2 0x%x\n",
3410 sliport_err2);
3411 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003412 }
3413 } else {
Suresh Reddy25848c92015-03-20 06:28:25 -04003414 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
3415 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
3416 ue_lo_mask = ioread32(adapter->pcicfg +
3417 PCICFG_UE_STATUS_LOW_MASK);
3418 ue_hi_mask = ioread32(adapter->pcicfg +
3419 PCICFG_UE_STATUS_HI_MASK);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003420
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003421 ue_lo = (ue_lo & ~ue_lo_mask);
3422 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003423
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303424 /* On certain platforms BE hardware can indicate spurious UEs.
3425 * Allow HW to stop working completely in case of a real UE.
3426 * Hence not setting the hw_error for UE detection.
3427 */
3428
3429 if (ue_lo || ue_hi) {
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05303430 dev_err(dev, "Error detected in the adapter");
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303431 if (skyhawk_chip(adapter))
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303432 be_set_error(adapter, BE_ERROR_UE);
3433
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303434 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
3435 if (ue_lo & 1)
3436 dev_err(dev, "UE: %s bit set\n",
3437 ue_status_low_desc[i]);
3438 }
3439 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
3440 if (ue_hi & 1)
3441 dev_err(dev, "UE: %s bit set\n",
3442 ue_status_hi_desc[i]);
3443 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05303444 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003445 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00003446}
3447
Sathya Perla8d56ff12009-11-22 22:02:26 +00003448static void be_msix_disable(struct be_adapter *adapter)
3449{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003450 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00003451 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003452 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303453 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003454 }
3455}
3456
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003457static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003458{
Dan Carpenter6fde0e62016-06-29 17:39:43 +03003459 unsigned int i, max_roce_eqs;
Sathya Perlad3791422012-09-28 04:39:44 +00003460 struct device *dev = &adapter->pdev->dev;
Dan Carpenter6fde0e62016-06-29 17:39:43 +03003461 int num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003462
Sathya Perlace7faf02016-06-22 08:54:53 -04003463 /* If RoCE is supported, program the max number of vectors that
3464 * could be used for NIC and RoCE, else, just program the number
3465 * we'll use initially.
Sathya Perla92bf14a2013-08-27 16:57:32 +05303466 */
Sathya Perlae2617682016-06-22 08:54:54 -04003467 if (be_roce_supported(adapter)) {
3468 max_roce_eqs =
3469 be_max_func_eqs(adapter) - be_max_nic_eqs(adapter);
3470 max_roce_eqs = min(max_roce_eqs, num_online_cpus());
3471 num_vec = be_max_any_irqs(adapter) + max_roce_eqs;
3472 } else {
3473 num_vec = max(adapter->cfg_num_rx_irqs,
3474 adapter->cfg_num_tx_irqs);
3475 }
Sathya Perla3abcded2010-10-03 22:12:27 -07003476
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003477 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003478 adapter->msix_entries[i].entry = i;
3479
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003480 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
3481 MIN_MSIX_VECTORS, num_vec);
3482 if (num_vec < 0)
3483 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00003484
Sathya Perla92bf14a2013-08-27 16:57:32 +05303485 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3486 adapter->num_msix_roce_vec = num_vec / 2;
3487 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3488 adapter->num_msix_roce_vec);
3489 }
3490
3491 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3492
3493 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3494 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003495 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003496
3497fail:
3498 dev_warn(dev, "MSIx enable failed\n");
3499
3500 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
Kalesh AP18c57c72015-05-06 05:30:38 -04003501 if (be_virtfn(adapter))
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003502 return num_vec;
3503 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003504}
3505
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003506static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303507 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003508{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05303509 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003510}
3511
3512static int be_msix_register(struct be_adapter *adapter)
3513{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003514 struct net_device *netdev = adapter->netdev;
3515 struct be_eq_obj *eqo;
3516 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003517
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003518 for_all_evt_queues(adapter, eqo, i) {
3519 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3520 vec = be_msix_vec_get(adapter, eqo);
3521 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07003522 if (status)
3523 goto err_msix;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003524
3525 irq_set_affinity_hint(vec, eqo->affinity_mask);
Sathya Perla3abcded2010-10-03 22:12:27 -07003526 }
Sathya Perlab628bde2009-08-17 00:58:26 +00003527
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003528 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003529err_msix:
Venkat Duvvuru6e3cd5f2015-12-18 01:40:50 +05303530 for (i--; i >= 0; i--) {
3531 eqo = &adapter->eq_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003532 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Venkat Duvvuru6e3cd5f2015-12-18 01:40:50 +05303533 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003534 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
Sathya Perla748b5392014-05-09 13:29:13 +05303535 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003536 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003537 return status;
3538}
3539
3540static int be_irq_register(struct be_adapter *adapter)
3541{
3542 struct net_device *netdev = adapter->netdev;
3543 int status;
3544
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003545 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003546 status = be_msix_register(adapter);
3547 if (status == 0)
3548 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003549 /* INTx is not supported for VF */
Kalesh AP18c57c72015-05-06 05:30:38 -04003550 if (be_virtfn(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003551 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003552 }
3553
Sathya Perlae49cc342012-11-27 19:50:02 +00003554 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003555 netdev->irq = adapter->pdev->irq;
3556 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00003557 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003558 if (status) {
3559 dev_err(&adapter->pdev->dev,
3560 "INTx request IRQ failed - err %d\n", status);
3561 return status;
3562 }
3563done:
3564 adapter->isr_registered = true;
3565 return 0;
3566}
3567
3568static void be_irq_unregister(struct be_adapter *adapter)
3569{
3570 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003571 struct be_eq_obj *eqo;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003572 int i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003573
3574 if (!adapter->isr_registered)
3575 return;
3576
3577 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003578 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00003579 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003580 goto done;
3581 }
3582
3583 /* MSIx */
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003584 for_all_evt_queues(adapter, eqo, i) {
3585 vec = be_msix_vec_get(adapter, eqo);
3586 irq_set_affinity_hint(vec, NULL);
3587 free_irq(vec, eqo);
3588 }
Sathya Perla3abcded2010-10-03 22:12:27 -07003589
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003590done:
3591 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003592}
3593
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003594static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003595{
Ajit Khaparde62219062016-02-10 22:45:53 +05303596 struct rss_info *rss = &adapter->rss_info;
Sathya Perla482c9e72011-06-29 23:33:17 +00003597 struct be_queue_info *q;
3598 struct be_rx_obj *rxo;
3599 int i;
3600
3601 for_all_rx_queues(adapter, rxo, i) {
3602 q = &rxo->q;
3603 if (q->created) {
Kalesh AP99b44302015-08-05 03:27:49 -04003604 /* If RXQs are destroyed while in an "out of buffer"
3605 * state, there is a possibility of an HW stall on
3606 * Lancer. So, post 64 buffers to each queue to relieve
3607 * the "out of buffer" condition.
3608 * Make sure there's space in the RXQ before posting.
3609 */
3610 if (lancer_chip(adapter)) {
3611 be_rx_cq_clean(rxo);
3612 if (atomic_read(&q->used) == 0)
3613 be_post_rx_frags(rxo, GFP_KERNEL,
3614 MAX_RX_POST);
3615 }
3616
Sathya Perla482c9e72011-06-29 23:33:17 +00003617 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003618 be_rx_cq_clean(rxo);
Kalesh AP99b44302015-08-05 03:27:49 -04003619 be_rxq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00003620 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003621 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00003622 }
Ajit Khaparde62219062016-02-10 22:45:53 +05303623
3624 if (rss->rss_flags) {
3625 rss->rss_flags = RSS_ENABLE_NONE;
3626 be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3627 128, rss->rss_hkey);
3628 }
Sathya Perla482c9e72011-06-29 23:33:17 +00003629}
3630
Kalesh APbcc84142015-08-05 03:27:48 -04003631static void be_disable_if_filters(struct be_adapter *adapter)
3632{
Ivan Vecera02434de2017-01-13 22:38:28 +01003633 /* Don't delete MAC on BE3 VFs without FILTMGMT privilege */
3634 if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
3635 check_privilege(adapter, BE_PRIV_FILTMGMT))
3636 be_dev_mac_del(adapter, adapter->pmac_id[0]);
3637
Kalesh APbcc84142015-08-05 03:27:48 -04003638 be_clear_uc_list(adapter);
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04003639 be_clear_mc_list(adapter);
Kalesh APbcc84142015-08-05 03:27:48 -04003640
3641 /* The IFACE flags are enabled in the open path and cleared
3642 * in the close path. When a VF gets detached from the host and
3643 * assigned to a VM the following happens:
3644 * - VF's IFACE flags get cleared in the detach path
3645 * - IFACE create is issued by the VF in the attach path
3646 * Due to a bug in the BE3/Skyhawk-R FW
3647 * (Lancer FW doesn't have the bug), the IFACE capability flags
3648 * specified along with the IFACE create cmd issued by a VF are not
3649 * honoured by FW. As a consequence, if a *new* driver
3650 * (that enables/disables IFACE flags in open/close)
3651 * is loaded in the host and an *old* driver is * used by a VM/VF,
3652 * the IFACE gets created *without* the needed flags.
3653 * To avoid this, disable RX-filter flags only for Lancer.
3654 */
3655 if (lancer_chip(adapter)) {
3656 be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
3657 adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
3658 }
3659}
3660
Sathya Perla889cd4b2010-05-30 23:33:45 +00003661static int be_close(struct net_device *netdev)
3662{
3663 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003664 struct be_eq_obj *eqo;
3665 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00003666
Kalesh APe1ad8e32014-04-14 16:12:41 +05303667 /* This protection is needed as be_close() may be called even when the
3668 * adapter is in cleared state (after eeh perm failure)
3669 */
3670 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3671 return 0;
3672
Sathya Perlab7172412016-07-27 05:26:18 -04003673 /* Before attempting cleanup ensure all the pending cmds in the
3674 * config_wq have finished execution
3675 */
3676 flush_workqueue(be_wq);
3677
Kalesh APbcc84142015-08-05 03:27:48 -04003678 be_disable_if_filters(adapter);
3679
Ivan Veceradff345c52013-11-27 08:59:32 +01003680 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3681 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00003682 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303683 be_disable_busy_poll(eqo);
3684 }
David S. Miller71237b62013-11-28 18:53:36 -05003685 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00003686 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003687
3688 be_async_mcc_disable(adapter);
3689
3690 /* Wait for all pending tx completions to arrive so that
3691 * all tx skbs are freed.
3692 */
Sathya Perlafba87552013-05-08 02:05:50 +00003693 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05303694 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003695
3696 be_rx_qs_destroy(adapter);
Ajit Khaparded11a3472013-11-18 10:44:37 -06003697
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003698 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003699 if (msix_enabled(adapter))
3700 synchronize_irq(be_msix_vec_get(adapter, eqo));
3701 else
3702 synchronize_irq(netdev->irq);
3703 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00003704 }
3705
Sathya Perla889cd4b2010-05-30 23:33:45 +00003706 be_irq_unregister(adapter);
3707
Sathya Perla482c9e72011-06-29 23:33:17 +00003708 return 0;
3709}
3710
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003711static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003712{
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003713 struct rss_info *rss = &adapter->rss_info;
3714 u8 rss_key[RSS_HASH_KEY_LEN];
Sathya Perla482c9e72011-06-29 23:33:17 +00003715 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003716 int rc, i, j;
Sathya Perla482c9e72011-06-29 23:33:17 +00003717
3718 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003719 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3720 sizeof(struct be_eth_rx_d));
3721 if (rc)
3722 return rc;
3723 }
3724
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003725 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3726 rxo = default_rxo(adapter);
3727 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3728 rx_frag_size, adapter->if_handle,
3729 false, &rxo->rss_id);
3730 if (rc)
3731 return rc;
3732 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003733
3734 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00003735 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003736 rx_frag_size, adapter->if_handle,
3737 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00003738 if (rc)
3739 return rc;
3740 }
3741
3742 if (be_multi_rxq(adapter)) {
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003743 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003744 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303745 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003746 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05303747 rss->rsstable[j + i] = rxo->rss_id;
3748 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003749 }
3750 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05303751 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3752 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00003753
3754 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05303755 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3756 RSS_ENABLE_UDP_IPV6;
Ajit Khaparde62219062016-02-10 22:45:53 +05303757
3758 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
3759 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3760 RSS_INDIR_TABLE_LEN, rss_key);
3761 if (rc) {
3762 rss->rss_flags = RSS_ENABLE_NONE;
3763 return rc;
3764 }
3765
3766 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303767 } else {
3768 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05303769 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303770 }
Suresh Reddy594ad542013-04-25 23:03:20 +00003771
Venkata Duvvurue2557872014-04-21 15:38:00 +05303772
Suresh Reddyb02e60c2015-05-06 05:30:36 -04003773 /* Post 1 less than RXQ-len to avoid head being equal to tail,
3774 * which is a queue empty condition
3775 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003776 for_all_rx_queues(adapter, rxo, i)
Suresh Reddyb02e60c2015-05-06 05:30:36 -04003777 be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
3778
Sathya Perla889cd4b2010-05-30 23:33:45 +00003779 return 0;
3780}
3781
Kalesh APbcc84142015-08-05 03:27:48 -04003782static int be_enable_if_filters(struct be_adapter *adapter)
3783{
3784 int status;
3785
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -05003786 status = be_cmd_rx_filter(adapter, BE_IF_FILT_FLAGS_BASIC, ON);
Kalesh APbcc84142015-08-05 03:27:48 -04003787 if (status)
3788 return status;
3789
3790 /* For BE3 VFs, the PF programs the initial MAC address */
3791 if (!(BEx_chip(adapter) && be_virtfn(adapter))) {
Suresh Reddy988d44b2016-09-07 19:57:52 +05303792 status = be_dev_mac_add(adapter, adapter->netdev->dev_addr);
Kalesh APbcc84142015-08-05 03:27:48 -04003793 if (status)
3794 return status;
Suresh Reddyc27ebf52016-09-07 19:57:53 +05303795 ether_addr_copy(adapter->dev_mac, adapter->netdev->dev_addr);
Kalesh APbcc84142015-08-05 03:27:48 -04003796 }
3797
3798 if (adapter->vlans_added)
3799 be_vid_config(adapter);
3800
Sathya Perlab7172412016-07-27 05:26:18 -04003801 __be_set_rx_mode(adapter);
Kalesh APbcc84142015-08-05 03:27:48 -04003802
3803 return 0;
3804}
3805
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003806static int be_open(struct net_device *netdev)
3807{
3808 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003809 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003810 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003811 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003812 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07003813 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003814
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003815 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00003816 if (status)
3817 goto err;
3818
Kalesh APbcc84142015-08-05 03:27:48 -04003819 status = be_enable_if_filters(adapter);
3820 if (status)
3821 goto err;
3822
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003823 status = be_irq_register(adapter);
3824 if (status)
3825 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003826
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003827 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07003828 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003829
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003830 for_all_tx_queues(adapter, txo, i)
3831 be_cq_notify(adapter, txo->cq.id, true, 0);
3832
Sathya Perla7a1e9b22010-02-17 01:35:11 +00003833 be_async_mcc_enable(adapter);
3834
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003835 for_all_evt_queues(adapter, eqo, i) {
3836 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303837 be_enable_busy_poll(eqo);
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003838 be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003839 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00003840 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003841
Sathya Perla323ff712012-09-28 04:39:43 +00003842 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003843 if (!status)
3844 be_link_status_update(adapter, link_status);
3845
Sathya Perlafba87552013-05-08 02:05:50 +00003846 netif_tx_start_all_queues(netdev);
Sathya Perlac9c47142014-03-27 10:46:19 +05303847 if (skyhawk_chip(adapter))
Alexander Duyckbde6b7c2016-06-16 12:21:43 -07003848 udp_tunnel_get_rx_info(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303849
Sathya Perla889cd4b2010-05-30 23:33:45 +00003850 return 0;
3851err:
3852 be_close(adapter->netdev);
3853 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003854}
3855
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003856static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3857{
3858 u32 addr;
3859
3860 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3861
3862 mac[5] = (u8)(addr & 0xFF);
3863 mac[4] = (u8)((addr >> 8) & 0xFF);
3864 mac[3] = (u8)((addr >> 16) & 0xFF);
3865 /* Use the OUI from the current MAC address */
3866 memcpy(mac, adapter->netdev->dev_addr, 3);
3867}
3868
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003869/*
3870 * Generate a seed MAC address from the PF MAC Address using jhash.
3871 * MAC Address for VFs are assigned incrementally starting from the seed.
3872 * These addresses are programmed in the ASIC by the PF and the VF driver
3873 * queries for the MAC address during its probe.
3874 */
Sathya Perla4c876612013-02-03 20:30:11 +00003875static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003876{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003877 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07003878 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003879 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00003880 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003881
3882 be_vf_eth_addr_generate(adapter, mac);
3883
Sathya Perla11ac75e2011-12-13 00:58:50 +00003884 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303885 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003886 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00003887 vf_cfg->if_handle,
3888 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303889 else
3890 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3891 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003892
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003893 if (status)
3894 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05303895 "Mac address assignment failed for VF %d\n",
3896 vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003897 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00003898 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003899
3900 mac[5] += 1;
3901 }
3902 return status;
3903}
3904
Sathya Perla4c876612013-02-03 20:30:11 +00003905static int be_vfs_mac_query(struct be_adapter *adapter)
3906{
3907 int status, vf;
3908 u8 mac[ETH_ALEN];
3909 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00003910
3911 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05303912 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3913 mac, vf_cfg->if_handle,
3914 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00003915 if (status)
3916 return status;
3917 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3918 }
3919 return 0;
3920}
3921
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003922static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003923{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003924 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003925 u32 vf;
3926
Sathya Perla257a3fe2013-06-14 15:54:51 +05303927 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00003928 dev_warn(&adapter->pdev->dev,
3929 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00003930 goto done;
3931 }
3932
Sathya Perlab4c1df92013-05-08 02:05:47 +00003933 pci_disable_sriov(adapter->pdev);
3934
Sathya Perla11ac75e2011-12-13 00:58:50 +00003935 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303936 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00003937 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3938 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303939 else
3940 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3941 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003942
Sathya Perla11ac75e2011-12-13 00:58:50 +00003943 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3944 }
Somnath Kotur884476b2016-06-22 08:54:55 -04003945
3946 if (BE3_chip(adapter))
3947 be_cmd_set_hsw_config(adapter, 0, 0,
3948 adapter->if_handle,
3949 PORT_FWD_TYPE_PASSTHRU, 0);
Sathya Perla39f1d942012-05-08 19:41:24 +00003950done:
3951 kfree(adapter->vf_cfg);
3952 adapter->num_vfs = 0;
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303953 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003954}
3955
Sathya Perla77071332013-08-27 16:57:34 +05303956static void be_clear_queues(struct be_adapter *adapter)
3957{
3958 be_mcc_queues_destroy(adapter);
3959 be_rx_cqs_destroy(adapter);
3960 be_tx_queues_destroy(adapter);
3961 be_evt_queues_destroy(adapter);
3962}
3963
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303964static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003965{
Sathya Perla191eb752012-02-23 18:50:13 +00003966 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3967 cancel_delayed_work_sync(&adapter->work);
3968 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3969 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303970}
3971
Sathya Perlaeb7dd462015-02-23 04:20:11 -05003972static void be_cancel_err_detection(struct be_adapter *adapter)
3973{
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05303974 struct be_error_recovery *err_rec = &adapter->error_recovery;
3975
3976 if (!be_err_recovery_workq)
3977 return;
3978
Sathya Perlaeb7dd462015-02-23 04:20:11 -05003979 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05303980 cancel_delayed_work_sync(&err_rec->err_detection_work);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05003981 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3982 }
3983}
3984
Sathya Perlac9c47142014-03-27 10:46:19 +05303985static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3986{
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003987 struct net_device *netdev = adapter->netdev;
3988
Sathya Perlac9c47142014-03-27 10:46:19 +05303989 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3990 be_cmd_manage_iface(adapter, adapter->if_handle,
3991 OP_CONVERT_TUNNEL_TO_NORMAL);
3992
3993 if (adapter->vxlan_port)
3994 be_cmd_set_vxlan_port(adapter, 0);
3995
3996 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3997 adapter->vxlan_port = 0;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003998
3999 netdev->hw_enc_features = 0;
4000 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05304001 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sathya Perlac9c47142014-03-27 10:46:19 +05304002}
4003
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004004static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs,
4005 struct be_resources *vft_res)
Vasundhara Volamf2858732015-03-04 00:44:33 -05004006{
4007 struct be_resources res = adapter->pool_res;
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004008 u32 vf_if_cap_flags = res.vf_if_cap_flags;
4009 struct be_resources res_mod = {0};
Vasundhara Volamf2858732015-03-04 00:44:33 -05004010 u16 num_vf_qs = 1;
4011
Somnath Koturde2b1e02016-06-06 07:22:10 -04004012 /* Distribute the queue resources among the PF and it's VFs */
4013 if (num_vfs) {
4014 /* Divide the rx queues evenly among the VFs and the PF, capped
4015 * at VF-EQ-count. Any remainder queues belong to the PF.
4016 */
Sriharsha Basavapatnaee9ad282016-02-03 09:49:19 +05304017 num_vf_qs = min(SH_VF_MAX_NIC_EQS,
4018 res.max_rss_qs / (num_vfs + 1));
Vasundhara Volamf2858732015-03-04 00:44:33 -05004019
Somnath Koturde2b1e02016-06-06 07:22:10 -04004020 /* Skyhawk-R chip supports only MAX_PORT_RSS_TABLES
4021 * RSS Tables per port. Provide RSS on VFs, only if number of
4022 * VFs requested is less than it's PF Pool's RSS Tables limit.
Vasundhara Volamf2858732015-03-04 00:44:33 -05004023 */
Somnath Koturde2b1e02016-06-06 07:22:10 -04004024 if (num_vfs >= be_max_pf_pool_rss_tables(adapter))
Vasundhara Volamf2858732015-03-04 00:44:33 -05004025 num_vf_qs = 1;
4026 }
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004027
4028 /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd,
4029 * which are modifiable using SET_PROFILE_CONFIG cmd.
4030 */
Somnath Koturde2b1e02016-06-06 07:22:10 -04004031 be_cmd_get_profile_config(adapter, &res_mod, NULL, ACTIVE_PROFILE_TYPE,
4032 RESOURCE_MODIFIABLE, 0);
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004033
4034 /* If RSS IFACE capability flags are modifiable for a VF, set the
4035 * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if
4036 * more than 1 RSSQ is available for a VF.
4037 * Otherwise, provision only 1 queue pair for VF.
4038 */
4039 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
4040 vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
4041 if (num_vf_qs > 1) {
4042 vf_if_cap_flags |= BE_IF_FLAGS_RSS;
4043 if (res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)
4044 vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS;
4045 } else {
4046 vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
4047 BE_IF_FLAGS_DEFQ_RSS);
4048 }
4049 } else {
4050 num_vf_qs = 1;
4051 }
4052
4053 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
4054 vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
4055 vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
4056 }
4057
4058 vft_res->vf_if_cap_flags = vf_if_cap_flags;
4059 vft_res->max_rx_qs = num_vf_qs;
4060 vft_res->max_rss_qs = num_vf_qs;
4061 vft_res->max_tx_qs = res.max_tx_qs / (num_vfs + 1);
4062 vft_res->max_cq_count = res.max_cq_count / (num_vfs + 1);
4063
4064 /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally
4065 * among the PF and it's VFs, if the fields are changeable
4066 */
4067 if (res_mod.max_uc_mac == FIELD_MODIFIABLE)
4068 vft_res->max_uc_mac = res.max_uc_mac / (num_vfs + 1);
4069
4070 if (res_mod.max_vlans == FIELD_MODIFIABLE)
4071 vft_res->max_vlans = res.max_vlans / (num_vfs + 1);
4072
4073 if (res_mod.max_iface_count == FIELD_MODIFIABLE)
4074 vft_res->max_iface_count = res.max_iface_count / (num_vfs + 1);
4075
4076 if (res_mod.max_mcc_count == FIELD_MODIFIABLE)
4077 vft_res->max_mcc_count = res.max_mcc_count / (num_vfs + 1);
Vasundhara Volamf2858732015-03-04 00:44:33 -05004078}
4079
Sathya Perlab7172412016-07-27 05:26:18 -04004080static void be_if_destroy(struct be_adapter *adapter)
4081{
4082 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
4083
4084 kfree(adapter->pmac_id);
4085 adapter->pmac_id = NULL;
4086
4087 kfree(adapter->mc_list);
4088 adapter->mc_list = NULL;
4089
4090 kfree(adapter->uc_list);
4091 adapter->uc_list = NULL;
4092}
4093
Somnath Koturb05004a2013-12-05 12:08:16 +05304094static int be_clear(struct be_adapter *adapter)
4095{
Vasundhara Volamf2858732015-03-04 00:44:33 -05004096 struct pci_dev *pdev = adapter->pdev;
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004097 struct be_resources vft_res = {0};
Vasundhara Volamf2858732015-03-04 00:44:33 -05004098
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304099 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00004100
Sathya Perlab7172412016-07-27 05:26:18 -04004101 flush_workqueue(be_wq);
4102
Sathya Perla11ac75e2011-12-13 00:58:50 +00004103 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004104 be_vf_clear(adapter);
4105
Vasundhara Volambec84e62014-06-30 13:01:32 +05304106 /* Re-configure FW to distribute resources evenly across max-supported
4107 * number of VFs, only when VFs are not already enabled.
4108 */
Vasundhara Volamace40af2015-03-04 00:44:34 -05004109 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
4110 !pci_vfs_assigned(pdev)) {
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004111 be_calculate_vf_res(adapter,
4112 pci_sriov_get_totalvfs(pdev),
4113 &vft_res);
Vasundhara Volambec84e62014-06-30 13:01:32 +05304114 be_cmd_set_sriov_config(adapter, adapter->pool_res,
Vasundhara Volamf2858732015-03-04 00:44:33 -05004115 pci_sriov_get_totalvfs(pdev),
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004116 &vft_res);
Vasundhara Volamf2858732015-03-04 00:44:33 -05004117 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05304118
Sathya Perlac9c47142014-03-27 10:46:19 +05304119 be_disable_vxlan_offloads(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00004120
Sathya Perlab7172412016-07-27 05:26:18 -04004121 be_if_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004122
Sathya Perla77071332013-08-27 16:57:34 +05304123 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004124
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004125 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05304126 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00004127 return 0;
4128}
4129
Sathya Perla4c876612013-02-03 20:30:11 +00004130static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004131{
Sathya Perla92bf14a2013-08-27 16:57:32 +05304132 struct be_resources res = {0};
Kalesh APbcc84142015-08-05 03:27:48 -04004133 u32 cap_flags, en_flags, vf;
Sathya Perla4c876612013-02-03 20:30:11 +00004134 struct be_vf_cfg *vf_cfg;
Kalesh AP0700d812015-01-20 03:51:43 -05004135 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004136
Kalesh AP0700d812015-01-20 03:51:43 -05004137 /* If a FW profile exists, then cap_flags are updated */
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -05004138 cap_flags = BE_VF_IF_EN_FLAGS;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004139
Sathya Perla4c876612013-02-03 20:30:11 +00004140 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304141 if (!BE3_chip(adapter)) {
Somnath Koturde2b1e02016-06-06 07:22:10 -04004142 status = be_cmd_get_profile_config(adapter, &res, NULL,
4143 ACTIVE_PROFILE_TYPE,
Vasundhara Volamf2858732015-03-04 00:44:33 -05004144 RESOURCE_LIMITS,
Sathya Perla92bf14a2013-08-27 16:57:32 +05304145 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04004146 if (!status) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304147 cap_flags = res.if_cap_flags;
Vasundhara Volam435452a2015-03-20 06:28:23 -04004148 /* Prevent VFs from enabling VLAN promiscuous
4149 * mode
4150 */
4151 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
4152 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05304153 }
Sathya Perla4c876612013-02-03 20:30:11 +00004154
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -05004155 /* PF should enable IF flags during proxy if_create call */
4156 en_flags = cap_flags & BE_VF_IF_EN_FLAGS;
Kalesh APbcc84142015-08-05 03:27:48 -04004157 status = be_cmd_if_create(adapter, cap_flags, en_flags,
4158 &vf_cfg->if_handle, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00004159 if (status)
Kalesh AP0700d812015-01-20 03:51:43 -05004160 return status;
Sathya Perla4c876612013-02-03 20:30:11 +00004161 }
Kalesh AP0700d812015-01-20 03:51:43 -05004162
4163 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004164}
4165
Sathya Perla39f1d942012-05-08 19:41:24 +00004166static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00004167{
Sathya Perla11ac75e2011-12-13 00:58:50 +00004168 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00004169 int vf;
4170
Sathya Perla39f1d942012-05-08 19:41:24 +00004171 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
4172 GFP_KERNEL);
4173 if (!adapter->vf_cfg)
4174 return -ENOMEM;
4175
Sathya Perla11ac75e2011-12-13 00:58:50 +00004176 for_all_vfs(adapter, vf_cfg, vf) {
4177 vf_cfg->if_handle = -1;
4178 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00004179 }
Sathya Perla39f1d942012-05-08 19:41:24 +00004180 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00004181}
4182
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004183static int be_vf_setup(struct be_adapter *adapter)
4184{
Sathya Perla4c876612013-02-03 20:30:11 +00004185 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05304186 struct be_vf_cfg *vf_cfg;
4187 int status, old_vfs, vf;
Kalesh APe7bcbd72015-05-06 05:30:32 -04004188 bool spoofchk;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004189
Sathya Perla257a3fe2013-06-14 15:54:51 +05304190 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla39f1d942012-05-08 19:41:24 +00004191
4192 status = be_vf_setup_init(adapter);
4193 if (status)
4194 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00004195
Sathya Perla4c876612013-02-03 20:30:11 +00004196 if (old_vfs) {
4197 for_all_vfs(adapter, vf_cfg, vf) {
4198 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
4199 if (status)
4200 goto err;
4201 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004202
Sathya Perla4c876612013-02-03 20:30:11 +00004203 status = be_vfs_mac_query(adapter);
4204 if (status)
4205 goto err;
4206 } else {
Vasundhara Volambec84e62014-06-30 13:01:32 +05304207 status = be_vfs_if_create(adapter);
4208 if (status)
4209 goto err;
4210
Sathya Perla39f1d942012-05-08 19:41:24 +00004211 status = be_vf_eth_addr_config(adapter);
4212 if (status)
4213 goto err;
4214 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004215
Sathya Perla11ac75e2011-12-13 00:58:50 +00004216 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05304217 /* Allow VFs to programs MAC/VLAN filters */
Vasundhara Volam435452a2015-03-20 06:28:23 -04004218 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
4219 vf + 1);
4220 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
Sathya Perla04a06022013-07-23 15:25:00 +05304221 status = be_cmd_set_fn_privileges(adapter,
Vasundhara Volam435452a2015-03-20 06:28:23 -04004222 vf_cfg->privileges |
Sathya Perla04a06022013-07-23 15:25:00 +05304223 BE_PRIV_FILTMGMT,
4224 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04004225 if (!status) {
4226 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
Sathya Perla04a06022013-07-23 15:25:00 +05304227 dev_info(dev, "VF%d has FILTMGMT privilege\n",
4228 vf);
Vasundhara Volam435452a2015-03-20 06:28:23 -04004229 }
Sathya Perla04a06022013-07-23 15:25:00 +05304230 }
4231
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05304232 /* Allow full available bandwidth */
4233 if (!old_vfs)
4234 be_cmd_config_qos(adapter, 0, 0, vf + 1);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00004235
Kalesh APe7bcbd72015-05-06 05:30:32 -04004236 status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
4237 vf_cfg->if_handle, NULL,
4238 &spoofchk);
4239 if (!status)
4240 vf_cfg->spoofchk = spoofchk;
4241
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304242 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05304243 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304244 be_cmd_set_logical_link_config(adapter,
4245 IFLA_VF_LINK_STATE_AUTO,
4246 vf+1);
4247 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004248 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00004249
4250 if (!old_vfs) {
4251 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
4252 if (status) {
4253 dev_err(dev, "SRIOV enable failed\n");
4254 adapter->num_vfs = 0;
4255 goto err;
4256 }
4257 }
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05304258
Somnath Kotur884476b2016-06-22 08:54:55 -04004259 if (BE3_chip(adapter)) {
4260 /* On BE3, enable VEB only when SRIOV is enabled */
4261 status = be_cmd_set_hsw_config(adapter, 0, 0,
4262 adapter->if_handle,
4263 PORT_FWD_TYPE_VEB, 0);
4264 if (status)
4265 goto err;
4266 }
4267
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05304268 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004269 return 0;
4270err:
Sathya Perla4c876612013-02-03 20:30:11 +00004271 dev_err(dev, "VF setup failed\n");
4272 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004273 return status;
4274}
4275
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304276/* Converting function_mode bits on BE3 to SH mc_type enums */
4277
4278static u8 be_convert_mc_type(u32 function_mode)
4279{
Suresh Reddy66064db2014-06-23 16:41:29 +05304280 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304281 return vNIC1;
Suresh Reddy66064db2014-06-23 16:41:29 +05304282 else if (function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304283 return FLEX10;
4284 else if (function_mode & VNIC_MODE)
4285 return vNIC2;
4286 else if (function_mode & UMC_ENABLED)
4287 return UMC;
4288 else
4289 return MC_NONE;
4290}
4291
Sathya Perla92bf14a2013-08-27 16:57:32 +05304292/* On BE2/BE3 FW does not suggest the supported limits */
4293static void BEx_get_resources(struct be_adapter *adapter,
4294 struct be_resources *res)
4295{
Vasundhara Volambec84e62014-06-30 13:01:32 +05304296 bool use_sriov = adapter->num_vfs ? 1 : 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304297
4298 if (be_physfn(adapter))
4299 res->max_uc_mac = BE_UC_PMAC_COUNT;
4300 else
4301 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
4302
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304303 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
4304
4305 if (be_is_mc(adapter)) {
4306 /* Assuming that there are 4 channels per port,
4307 * when multi-channel is enabled
4308 */
4309 if (be_is_qnq_mode(adapter))
4310 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
4311 else
4312 /* In a non-qnq multichannel mode, the pvid
4313 * takes up one vlan entry
4314 */
4315 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
4316 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304317 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304318 }
4319
Sathya Perla92bf14a2013-08-27 16:57:32 +05304320 res->max_mcast_mac = BE_MAX_MC;
4321
Vasundhara Volama5243da2014-03-11 18:53:07 +05304322 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
4323 * 2) Create multiple TX rings on a BE3-R multi-channel interface
4324 * *only* if it is RSS-capable.
4325 */
4326 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
Kalesh AP18c57c72015-05-06 05:30:38 -04004327 be_virtfn(adapter) ||
4328 (be_is_mc(adapter) &&
4329 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304330 res->max_tx_qs = 1;
Suresh Reddya28277d2014-09-02 09:56:57 +05304331 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
4332 struct be_resources super_nic_res = {0};
4333
4334 /* On a SuperNIC profile, the driver needs to use the
4335 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
4336 */
Somnath Koturde2b1e02016-06-06 07:22:10 -04004337 be_cmd_get_profile_config(adapter, &super_nic_res, NULL,
4338 ACTIVE_PROFILE_TYPE, RESOURCE_LIMITS,
4339 0);
Suresh Reddya28277d2014-09-02 09:56:57 +05304340 /* Some old versions of BE3 FW don't report max_tx_qs value */
4341 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
4342 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304343 res->max_tx_qs = BE3_MAX_TX_QS;
Suresh Reddya28277d2014-09-02 09:56:57 +05304344 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05304345
4346 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
4347 !use_sriov && be_physfn(adapter))
4348 res->max_rss_qs = (adapter->be3_native) ?
4349 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
4350 res->max_rx_qs = res->max_rss_qs + 1;
4351
Suresh Reddye3dc8672014-01-06 13:02:25 +05304352 if (be_physfn(adapter))
Vasundhara Volamd3518e22014-07-17 16:20:29 +05304353 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05304354 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
4355 else
4356 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304357
4358 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004359 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304360 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
4361 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
4362}
4363
Sathya Perla30128032011-11-10 19:17:57 +00004364static void be_setup_init(struct be_adapter *adapter)
4365{
4366 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004367 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00004368 adapter->if_handle = -1;
4369 adapter->be3_native = false;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05004370 adapter->if_flags = 0;
Ajit Khaparde51d1f982016-02-10 22:45:54 +05304371 adapter->phy_state = BE_UNKNOWN_PHY_STATE;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004372 if (be_physfn(adapter))
4373 adapter->cmd_privileges = MAX_PRIVILEGES;
4374 else
4375 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00004376}
4377
Somnath Koturde2b1e02016-06-06 07:22:10 -04004378/* HW supports only MAX_PORT_RSS_TABLES RSS Policy Tables per port.
4379 * However, this HW limitation is not exposed to the host via any SLI cmd.
4380 * As a result, in the case of SRIOV and in particular multi-partition configs
4381 * the driver needs to calcuate a proportional share of RSS Tables per PF-pool
4382 * for distribution between the VFs. This self-imposed limit will determine the
4383 * no: of VFs for which RSS can be enabled.
4384 */
Baoyou Xied766e7e2016-09-18 16:35:29 +08004385static void be_calculate_pf_pool_rss_tables(struct be_adapter *adapter)
Somnath Koturde2b1e02016-06-06 07:22:10 -04004386{
4387 struct be_port_resources port_res = {0};
4388 u8 rss_tables_on_port;
4389 u16 max_vfs = be_max_vfs(adapter);
4390
4391 be_cmd_get_profile_config(adapter, NULL, &port_res, SAVED_PROFILE_TYPE,
4392 RESOURCE_LIMITS, 0);
4393
4394 rss_tables_on_port = MAX_PORT_RSS_TABLES - port_res.nic_pfs;
4395
4396 /* Each PF Pool's RSS Tables limit =
4397 * PF's Max VFs / Total_Max_VFs on Port * RSS Tables on Port
4398 */
4399 adapter->pool_res.max_rss_tables =
4400 max_vfs * rss_tables_on_port / port_res.max_vfs;
4401}
4402
Vasundhara Volambec84e62014-06-30 13:01:32 +05304403static int be_get_sriov_config(struct be_adapter *adapter)
4404{
Vasundhara Volambec84e62014-06-30 13:01:32 +05304405 struct be_resources res = {0};
Sathya Perlad3d18312014-08-01 17:47:30 +05304406 int max_vfs, old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304407
Somnath Koturde2b1e02016-06-06 07:22:10 -04004408 be_cmd_get_profile_config(adapter, &res, NULL, ACTIVE_PROFILE_TYPE,
4409 RESOURCE_LIMITS, 0);
Sathya Perlad3d18312014-08-01 17:47:30 +05304410
Vasundhara Volamace40af2015-03-04 00:44:34 -05004411 /* Some old versions of BE3 FW don't report max_vfs value */
Vasundhara Volambec84e62014-06-30 13:01:32 +05304412 if (BE3_chip(adapter) && !res.max_vfs) {
4413 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
4414 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
4415 }
4416
Sathya Perlad3d18312014-08-01 17:47:30 +05304417 adapter->pool_res = res;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304418
Vasundhara Volamace40af2015-03-04 00:44:34 -05004419 /* If during previous unload of the driver, the VFs were not disabled,
4420 * then we cannot rely on the PF POOL limits for the TotalVFs value.
4421 * Instead use the TotalVFs value stored in the pci-dev struct.
4422 */
Vasundhara Volambec84e62014-06-30 13:01:32 +05304423 old_vfs = pci_num_vf(adapter->pdev);
4424 if (old_vfs) {
Vasundhara Volamace40af2015-03-04 00:44:34 -05004425 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
4426 old_vfs);
4427
4428 adapter->pool_res.max_vfs =
4429 pci_sriov_get_totalvfs(adapter->pdev);
Vasundhara Volambec84e62014-06-30 13:01:32 +05304430 adapter->num_vfs = old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304431 }
4432
Somnath Koturde2b1e02016-06-06 07:22:10 -04004433 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
4434 be_calculate_pf_pool_rss_tables(adapter);
4435 dev_info(&adapter->pdev->dev,
4436 "RSS can be enabled for all VFs if num_vfs <= %d\n",
4437 be_max_pf_pool_rss_tables(adapter));
4438 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05304439 return 0;
4440}
4441
Vasundhara Volamace40af2015-03-04 00:44:34 -05004442static void be_alloc_sriov_res(struct be_adapter *adapter)
4443{
4444 int old_vfs = pci_num_vf(adapter->pdev);
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004445 struct be_resources vft_res = {0};
Vasundhara Volamace40af2015-03-04 00:44:34 -05004446 int status;
4447
4448 be_get_sriov_config(adapter);
4449
4450 if (!old_vfs)
4451 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
4452
4453 /* When the HW is in SRIOV capable configuration, the PF-pool
4454 * resources are given to PF during driver load, if there are no
4455 * old VFs. This facility is not available in BE3 FW.
4456 * Also, this is done by FW in Lancer chip.
4457 */
4458 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004459 be_calculate_vf_res(adapter, 0, &vft_res);
Vasundhara Volamace40af2015-03-04 00:44:34 -05004460 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004461 &vft_res);
Vasundhara Volamace40af2015-03-04 00:44:34 -05004462 if (status)
4463 dev_err(&adapter->pdev->dev,
4464 "Failed to optimize SRIOV resources\n");
4465 }
4466}
4467
Sathya Perla92bf14a2013-08-27 16:57:32 +05304468static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004469{
Sathya Perla92bf14a2013-08-27 16:57:32 +05304470 struct device *dev = &adapter->pdev->dev;
4471 struct be_resources res = {0};
4472 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004473
Sathya Perla92bf14a2013-08-27 16:57:32 +05304474 /* For Lancer, SH etc read per-function resource limits from FW.
4475 * GET_FUNC_CONFIG returns per function guaranteed limits.
4476 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
4477 */
Sathya Perlace7faf02016-06-22 08:54:53 -04004478 if (BEx_chip(adapter)) {
4479 BEx_get_resources(adapter, &res);
4480 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304481 status = be_cmd_get_func_config(adapter, &res);
4482 if (status)
4483 return status;
4484
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004485 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
4486 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
4487 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
4488 res.max_rss_qs -= 1;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004489 }
4490
Sathya Perlace7faf02016-06-22 08:54:53 -04004491 /* If RoCE is supported stash away half the EQs for RoCE */
4492 res.max_nic_evt_qs = be_roce_supported(adapter) ?
4493 res.max_evt_qs / 2 : res.max_evt_qs;
4494 adapter->res = res;
4495
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004496 /* If FW supports RSS default queue, then skip creating non-RSS
4497 * queue for non-IP traffic.
4498 */
4499 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
4500 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
4501
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304502 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
4503 be_max_txqs(adapter), be_max_rxqs(adapter),
Sathya Perlace7faf02016-06-22 08:54:53 -04004504 be_max_rss(adapter), be_max_nic_eqs(adapter),
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304505 be_max_vfs(adapter));
4506 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
4507 be_max_uc(adapter), be_max_mc(adapter),
4508 be_max_vlans(adapter));
4509
Sathya Perlae2617682016-06-22 08:54:54 -04004510 /* Ensure RX and TX queues are created in pairs at init time */
4511 adapter->cfg_num_rx_irqs =
4512 min_t(u16, netif_get_num_default_rss_queues(),
4513 be_max_qp_irqs(adapter));
4514 adapter->cfg_num_tx_irqs = adapter->cfg_num_rx_irqs;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304515 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004516}
4517
Sathya Perla39f1d942012-05-08 19:41:24 +00004518static int be_get_config(struct be_adapter *adapter)
4519{
Sathya Perla6b085ba2015-02-23 04:20:09 -05004520 int status, level;
Vasundhara Volam542963b2014-01-15 13:23:33 +05304521 u16 profile_id;
Sathya Perla6b085ba2015-02-23 04:20:09 -05004522
Suresh Reddy980df242015-12-30 01:29:03 -05004523 status = be_cmd_get_cntl_attributes(adapter);
4524 if (status)
4525 return status;
4526
Kalesh APe97e3cd2014-07-17 16:20:26 +05304527 status = be_cmd_query_fw_cfg(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004528 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05304529 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004530
Venkat Duvvurufd7ff6f2015-12-30 01:29:04 -05004531 if (!lancer_chip(adapter) && be_physfn(adapter))
4532 be_cmd_get_fat_dump_len(adapter, &adapter->fat_dump_len);
4533
Sathya Perla6b085ba2015-02-23 04:20:09 -05004534 if (BEx_chip(adapter)) {
4535 level = be_cmd_get_fw_log_level(adapter);
4536 adapter->msg_enable =
4537 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4538 }
4539
4540 be_cmd_get_acpi_wol_cap(adapter);
Sriharsha Basavapatna45f13df2016-06-06 07:22:09 -04004541 pci_enable_wake(adapter->pdev, PCI_D3hot, adapter->wol_en);
4542 pci_enable_wake(adapter->pdev, PCI_D3cold, adapter->wol_en);
Sathya Perla6b085ba2015-02-23 04:20:09 -05004543
Vasundhara Volam21252372015-02-06 08:18:42 -05004544 be_cmd_query_port_name(adapter);
4545
4546 if (be_physfn(adapter)) {
Vasundhara Volam542963b2014-01-15 13:23:33 +05304547 status = be_cmd_get_active_profile(adapter, &profile_id);
4548 if (!status)
4549 dev_info(&adapter->pdev->dev,
4550 "Using profile 0x%x\n", profile_id);
Vasundhara Volam962bcb72014-07-17 16:20:30 +05304551 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05304552
Sathya Perla92bf14a2013-08-27 16:57:32 +05304553 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00004554}
4555
Sathya Perla95046b92013-07-23 15:25:02 +05304556static int be_mac_setup(struct be_adapter *adapter)
4557{
4558 u8 mac[ETH_ALEN];
4559 int status;
4560
4561 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
4562 status = be_cmd_get_perm_mac(adapter, mac);
4563 if (status)
4564 return status;
4565
4566 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4567 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
Sathya Perla95046b92013-07-23 15:25:02 +05304568 }
4569
Sathya Perla95046b92013-07-23 15:25:02 +05304570 return 0;
4571}
4572
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304573static void be_schedule_worker(struct be_adapter *adapter)
4574{
Sathya Perlab7172412016-07-27 05:26:18 -04004575 queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000));
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304576 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
4577}
4578
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304579static void be_destroy_err_recovery_workq(void)
4580{
4581 if (!be_err_recovery_workq)
4582 return;
4583
4584 flush_workqueue(be_err_recovery_workq);
4585 destroy_workqueue(be_err_recovery_workq);
4586 be_err_recovery_workq = NULL;
4587}
4588
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05304589static void be_schedule_err_detection(struct be_adapter *adapter, u32 delay)
Sathya Perlaeb7dd462015-02-23 04:20:11 -05004590{
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304591 struct be_error_recovery *err_rec = &adapter->error_recovery;
4592
4593 if (!be_err_recovery_workq)
4594 return;
4595
4596 queue_delayed_work(be_err_recovery_workq, &err_rec->err_detection_work,
4597 msecs_to_jiffies(delay));
Sathya Perlaeb7dd462015-02-23 04:20:11 -05004598 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
4599}
4600
Sathya Perla77071332013-08-27 16:57:34 +05304601static int be_setup_queues(struct be_adapter *adapter)
4602{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304603 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05304604 int status;
4605
4606 status = be_evt_queues_create(adapter);
4607 if (status)
4608 goto err;
4609
4610 status = be_tx_qs_create(adapter);
4611 if (status)
4612 goto err;
4613
4614 status = be_rx_cqs_create(adapter);
4615 if (status)
4616 goto err;
4617
4618 status = be_mcc_queues_create(adapter);
4619 if (status)
4620 goto err;
4621
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304622 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
4623 if (status)
4624 goto err;
4625
4626 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
4627 if (status)
4628 goto err;
4629
Sathya Perla77071332013-08-27 16:57:34 +05304630 return 0;
4631err:
4632 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4633 return status;
4634}
4635
Ajit Khaparde62219062016-02-10 22:45:53 +05304636static int be_if_create(struct be_adapter *adapter)
4637{
4638 u32 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
4639 u32 cap_flags = be_if_cap_flags(adapter);
4640 int status;
4641
Sathya Perlab7172412016-07-27 05:26:18 -04004642 /* alloc required memory for other filtering fields */
4643 adapter->pmac_id = kcalloc(be_max_uc(adapter),
4644 sizeof(*adapter->pmac_id), GFP_KERNEL);
4645 if (!adapter->pmac_id)
4646 return -ENOMEM;
4647
4648 adapter->mc_list = kcalloc(be_max_mc(adapter),
4649 sizeof(*adapter->mc_list), GFP_KERNEL);
4650 if (!adapter->mc_list)
4651 return -ENOMEM;
4652
4653 adapter->uc_list = kcalloc(be_max_uc(adapter),
4654 sizeof(*adapter->uc_list), GFP_KERNEL);
4655 if (!adapter->uc_list)
4656 return -ENOMEM;
4657
Sathya Perlae2617682016-06-22 08:54:54 -04004658 if (adapter->cfg_num_rx_irqs == 1)
Ajit Khaparde62219062016-02-10 22:45:53 +05304659 cap_flags &= ~(BE_IF_FLAGS_DEFQ_RSS | BE_IF_FLAGS_RSS);
4660
4661 en_flags &= cap_flags;
4662 /* will enable all the needed filter flags in be_open() */
4663 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
4664 &adapter->if_handle, 0);
4665
Sathya Perlab7172412016-07-27 05:26:18 -04004666 if (status)
4667 return status;
4668
4669 return 0;
Ajit Khaparde62219062016-02-10 22:45:53 +05304670}
4671
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304672int be_update_queues(struct be_adapter *adapter)
4673{
4674 struct net_device *netdev = adapter->netdev;
4675 int status;
4676
4677 if (netif_running(netdev))
4678 be_close(netdev);
4679
4680 be_cancel_worker(adapter);
4681
4682 /* If any vectors have been shared with RoCE we cannot re-program
4683 * the MSIx table.
4684 */
4685 if (!adapter->num_msix_roce_vec)
4686 be_msix_disable(adapter);
4687
4688 be_clear_queues(adapter);
Ajit Khaparde62219062016-02-10 22:45:53 +05304689 status = be_cmd_if_destroy(adapter, adapter->if_handle, 0);
4690 if (status)
4691 return status;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304692
4693 if (!msix_enabled(adapter)) {
4694 status = be_msix_enable(adapter);
4695 if (status)
4696 return status;
4697 }
4698
Ajit Khaparde62219062016-02-10 22:45:53 +05304699 status = be_if_create(adapter);
4700 if (status)
4701 return status;
4702
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304703 status = be_setup_queues(adapter);
4704 if (status)
4705 return status;
4706
4707 be_schedule_worker(adapter);
4708
4709 if (netif_running(netdev))
4710 status = be_open(netdev);
4711
4712 return status;
4713}
4714
Sathya Perlaf7062ee2015-02-06 08:18:35 -05004715static inline int fw_major_num(const char *fw_ver)
4716{
4717 int fw_major = 0, i;
4718
4719 i = sscanf(fw_ver, "%d.", &fw_major);
4720 if (i != 1)
4721 return 0;
4722
4723 return fw_major;
4724}
4725
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304726/* If it is error recovery, FLR the PF
4727 * Else if any VFs are already enabled don't FLR the PF
4728 */
Sathya Perlaf962f842015-02-23 04:20:16 -05004729static bool be_reset_required(struct be_adapter *adapter)
4730{
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304731 if (be_error_recovering(adapter))
4732 return true;
4733 else
4734 return pci_num_vf(adapter->pdev) == 0;
Sathya Perlaf962f842015-02-23 04:20:16 -05004735}
4736
4737/* Wait for the FW to be ready and perform the required initialization */
4738static int be_func_init(struct be_adapter *adapter)
4739{
4740 int status;
4741
4742 status = be_fw_wait_ready(adapter);
4743 if (status)
4744 return status;
4745
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304746 /* FW is now ready; clear errors to allow cmds/doorbell */
4747 be_clear_error(adapter, BE_CLEAR_ALL);
4748
Sathya Perlaf962f842015-02-23 04:20:16 -05004749 if (be_reset_required(adapter)) {
4750 status = be_cmd_reset_function(adapter);
4751 if (status)
4752 return status;
4753
4754 /* Wait for interrupts to quiesce after an FLR */
4755 msleep(100);
Sathya Perlaf962f842015-02-23 04:20:16 -05004756 }
4757
4758 /* Tell FW we're ready to fire cmds */
4759 status = be_cmd_fw_init(adapter);
4760 if (status)
4761 return status;
4762
4763 /* Allow interrupts for other ULPs running on NIC function */
4764 be_intr_set(adapter, true);
4765
4766 return 0;
4767}
4768
Sathya Perla5fb379e2009-06-18 00:02:59 +00004769static int be_setup(struct be_adapter *adapter)
4770{
Sathya Perla39f1d942012-05-08 19:41:24 +00004771 struct device *dev = &adapter->pdev->dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004772 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004773
Sathya Perlaf962f842015-02-23 04:20:16 -05004774 status = be_func_init(adapter);
4775 if (status)
4776 return status;
4777
Sathya Perla30128032011-11-10 19:17:57 +00004778 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004779
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004780 if (!lancer_chip(adapter))
4781 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00004782
Suresh Reddy980df242015-12-30 01:29:03 -05004783 /* invoke this cmd first to get pf_num and vf_num which are needed
4784 * for issuing profile related cmds
4785 */
4786 if (!BEx_chip(adapter)) {
4787 status = be_cmd_get_func_config(adapter, NULL);
4788 if (status)
4789 return status;
4790 }
Somnath Kotur72ef3a82015-10-12 03:47:20 -04004791
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004792 status = be_get_config(adapter);
4793 if (status)
4794 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00004795
Somnath Koturde2b1e02016-06-06 07:22:10 -04004796 if (!BE2_chip(adapter) && be_physfn(adapter))
4797 be_alloc_sriov_res(adapter);
4798
4799 status = be_get_resources(adapter);
4800 if (status)
4801 goto err;
4802
Somnath Koturc2bba3d2013-05-02 03:37:08 +00004803 status = be_msix_enable(adapter);
4804 if (status)
4805 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004806
Kalesh APbcc84142015-08-05 03:27:48 -04004807 /* will enable all the needed filter flags in be_open() */
Ajit Khaparde62219062016-02-10 22:45:53 +05304808 status = be_if_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004809 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004810 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004811
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304812 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4813 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05304814 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304815 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004816 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004817 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004818
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004819 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004820
Sathya Perla95046b92013-07-23 15:25:02 +05304821 status = be_mac_setup(adapter);
4822 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00004823 goto err;
4824
Kalesh APe97e3cd2014-07-17 16:20:26 +05304825 be_cmd_get_fw_ver(adapter);
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304826 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00004827
Somnath Koture9e2a902013-10-24 14:37:53 +05304828 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
Vasundhara Volam50762662014-09-12 17:39:14 +05304829 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
Somnath Koture9e2a902013-10-24 14:37:53 +05304830 adapter->fw_ver);
4831 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4832 }
4833
Kalesh AP00d594c2015-01-20 03:51:44 -05004834 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4835 adapter->rx_fc);
4836 if (status)
4837 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4838 &adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00004839
Kalesh AP00d594c2015-01-20 03:51:44 -05004840 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4841 adapter->tx_fc, adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004842
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304843 if (be_physfn(adapter))
4844 be_cmd_set_logical_link_config(adapter,
4845 IFLA_VF_LINK_STATE_AUTO, 0);
4846
Somnath Kotur884476b2016-06-22 08:54:55 -04004847 /* BE3 EVB echoes broadcast/multicast packets back to PF's vport
4848 * confusing a linux bridge or OVS that it might be connected to.
4849 * Set the EVB to PASSTHRU mode which effectively disables the EVB
4850 * when SRIOV is not enabled.
4851 */
4852 if (BE3_chip(adapter))
4853 be_cmd_set_hsw_config(adapter, 0, 0, adapter->if_handle,
4854 PORT_FWD_TYPE_PASSTHRU, 0);
4855
Vasundhara Volambec84e62014-06-30 13:01:32 +05304856 if (adapter->num_vfs)
4857 be_vf_setup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004858
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004859 status = be_cmd_get_phy_info(adapter);
4860 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004861 adapter->phy.fc_autoneg = 1;
4862
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304863 if (be_physfn(adapter) && !lancer_chip(adapter))
4864 be_cmd_set_features(adapter);
4865
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304866 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05304867 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004868 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00004869err:
4870 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004871 return status;
4872}
4873
Ivan Vecera66268732011-12-08 01:31:21 +00004874#ifdef CONFIG_NET_POLL_CONTROLLER
4875static void be_netpoll(struct net_device *netdev)
4876{
4877 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004878 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00004879 int i;
4880
Sathya Perlae49cc342012-11-27 19:50:02 +00004881 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04004882 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
Sathya Perlae49cc342012-11-27 19:50:02 +00004883 napi_schedule(&eqo->napi);
4884 }
Ivan Vecera66268732011-12-08 01:31:21 +00004885}
4886#endif
4887
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004888int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4889{
4890 const struct firmware *fw;
4891 int status;
4892
4893 if (!netif_running(adapter->netdev)) {
4894 dev_err(&adapter->pdev->dev,
4895 "Firmware load not allowed (interface is down)\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05304896 return -ENETDOWN;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004897 }
4898
4899 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4900 if (status)
4901 goto fw_exit;
4902
4903 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4904
4905 if (lancer_chip(adapter))
4906 status = lancer_fw_download(adapter, fw);
4907 else
4908 status = be_fw_download(adapter, fw);
4909
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004910 if (!status)
Kalesh APe97e3cd2014-07-17 16:20:26 +05304911 be_cmd_get_fw_ver(adapter);
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004912
Ajit Khaparde84517482009-09-04 03:12:16 +00004913fw_exit:
4914 release_firmware(fw);
4915 return status;
4916}
4917
Roopa Prabhuadd511b2015-01-29 22:40:12 -08004918static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4919 u16 flags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004920{
4921 struct be_adapter *adapter = netdev_priv(dev);
4922 struct nlattr *attr, *br_spec;
4923 int rem;
4924 int status = 0;
4925 u16 mode = 0;
4926
4927 if (!sriov_enabled(adapter))
4928 return -EOPNOTSUPP;
4929
4930 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
Thomas Graf4ea85e82014-11-26 13:42:18 +01004931 if (!br_spec)
4932 return -EINVAL;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004933
4934 nla_for_each_nested(attr, br_spec, rem) {
4935 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4936 continue;
4937
Thomas Grafb7c1a312014-11-26 13:42:17 +01004938 if (nla_len(attr) < sizeof(mode))
4939 return -EINVAL;
4940
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004941 mode = nla_get_u16(attr);
Suresh Reddyac0f5fb2015-12-30 01:28:57 -05004942 if (BE3_chip(adapter) && mode == BRIDGE_MODE_VEPA)
4943 return -EOPNOTSUPP;
4944
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004945 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4946 return -EINVAL;
4947
4948 status = be_cmd_set_hsw_config(adapter, 0, 0,
4949 adapter->if_handle,
4950 mode == BRIDGE_MODE_VEPA ?
4951 PORT_FWD_TYPE_VEPA :
Kalesh APe7bcbd72015-05-06 05:30:32 -04004952 PORT_FWD_TYPE_VEB, 0);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004953 if (status)
4954 goto err;
4955
4956 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4957 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4958
4959 return status;
4960 }
4961err:
4962 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4963 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4964
4965 return status;
4966}
4967
4968static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Nicolas Dichtel46c264d2015-04-28 18:33:49 +02004969 struct net_device *dev, u32 filter_mask,
4970 int nlflags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004971{
4972 struct be_adapter *adapter = netdev_priv(dev);
4973 int status = 0;
4974 u8 hsw_mode;
4975
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004976 /* BE and Lancer chips support VEB mode only */
4977 if (BEx_chip(adapter) || lancer_chip(adapter)) {
Ivan Vecera84317062016-02-11 12:42:26 +01004978 /* VEB is disabled in non-SR-IOV profiles on BE3/Lancer */
4979 if (!pci_sriov_get_totalvfs(adapter->pdev))
4980 return 0;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004981 hsw_mode = PORT_FWD_TYPE_VEB;
4982 } else {
4983 status = be_cmd_get_hsw_config(adapter, NULL, 0,
Kalesh APe7bcbd72015-05-06 05:30:32 -04004984 adapter->if_handle, &hsw_mode,
4985 NULL);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004986 if (status)
4987 return 0;
Kalesh Purayilff9ed192015-07-10 05:32:44 -04004988
4989 if (hsw_mode == PORT_FWD_TYPE_PASSTHRU)
4990 return 0;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004991 }
4992
4993 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4994 hsw_mode == PORT_FWD_TYPE_VEPA ?
Scott Feldman2c3c0312014-11-28 14:34:25 +01004995 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
Scott Feldman7d4f8d82015-06-22 00:27:17 -07004996 0, 0, nlflags, filter_mask, NULL);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004997}
4998
Sathya Perlab7172412016-07-27 05:26:18 -04004999static struct be_cmd_work *be_alloc_work(struct be_adapter *adapter,
5000 void (*func)(struct work_struct *))
5001{
5002 struct be_cmd_work *work;
5003
5004 work = kzalloc(sizeof(*work), GFP_ATOMIC);
5005 if (!work) {
5006 dev_err(&adapter->pdev->dev,
5007 "be_work memory allocation failed\n");
5008 return NULL;
5009 }
5010
5011 INIT_WORK(&work->work, func);
5012 work->adapter = adapter;
5013 return work;
5014}
5015
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005016/* VxLAN offload Notes:
5017 *
5018 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
5019 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
5020 * is expected to work across all types of IP tunnels once exported. Skyhawk
5021 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05305022 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
5023 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
5024 * those other tunnels are unexported on the fly through ndo_features_check().
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005025 *
5026 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
5027 * adds more than one port, disable offloads and don't re-enable them again
5028 * until after all the tunnels are removed.
5029 */
Sathya Perlab7172412016-07-27 05:26:18 -04005030static void be_work_add_vxlan_port(struct work_struct *work)
Sathya Perlac9c47142014-03-27 10:46:19 +05305031{
Sathya Perlab7172412016-07-27 05:26:18 -04005032 struct be_cmd_work *cmd_work =
5033 container_of(work, struct be_cmd_work, work);
5034 struct be_adapter *adapter = cmd_work->adapter;
5035 struct net_device *netdev = adapter->netdev;
Sathya Perlac9c47142014-03-27 10:46:19 +05305036 struct device *dev = &adapter->pdev->dev;
Sathya Perlab7172412016-07-27 05:26:18 -04005037 __be16 port = cmd_work->info.vxlan_port;
Sathya Perlac9c47142014-03-27 10:46:19 +05305038 int status;
5039
Jiri Benc1e5b3112015-09-17 16:11:13 +02005040 if (adapter->vxlan_port == port && adapter->vxlan_port_count) {
5041 adapter->vxlan_port_aliases++;
Sathya Perlab7172412016-07-27 05:26:18 -04005042 goto done;
Jiri Benc1e5b3112015-09-17 16:11:13 +02005043 }
5044
Sathya Perlac9c47142014-03-27 10:46:19 +05305045 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
Sathya Perlac9c47142014-03-27 10:46:19 +05305046 dev_info(dev,
5047 "Only one UDP port supported for VxLAN offloads\n");
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005048 dev_info(dev, "Disabling VxLAN offloads\n");
5049 adapter->vxlan_port_count++;
5050 goto err;
Sathya Perlac9c47142014-03-27 10:46:19 +05305051 }
5052
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005053 if (adapter->vxlan_port_count++ >= 1)
Sathya Perlab7172412016-07-27 05:26:18 -04005054 goto done;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005055
Sathya Perlac9c47142014-03-27 10:46:19 +05305056 status = be_cmd_manage_iface(adapter, adapter->if_handle,
5057 OP_CONVERT_NORMAL_TO_TUNNEL);
5058 if (status) {
5059 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
5060 goto err;
5061 }
5062
5063 status = be_cmd_set_vxlan_port(adapter, port);
5064 if (status) {
5065 dev_warn(dev, "Failed to add VxLAN port\n");
5066 goto err;
5067 }
5068 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
5069 adapter->vxlan_port = port;
5070
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005071 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5072 NETIF_F_TSO | NETIF_F_TSO6 |
5073 NETIF_F_GSO_UDP_TUNNEL;
5074 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05305075 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005076
Sathya Perlac9c47142014-03-27 10:46:19 +05305077 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
5078 be16_to_cpu(port));
Sathya Perlab7172412016-07-27 05:26:18 -04005079 goto done;
Sathya Perlac9c47142014-03-27 10:46:19 +05305080err:
5081 be_disable_vxlan_offloads(adapter);
Sathya Perlab7172412016-07-27 05:26:18 -04005082done:
5083 kfree(cmd_work);
Sathya Perlac9c47142014-03-27 10:46:19 +05305084}
5085
Sathya Perlab7172412016-07-27 05:26:18 -04005086static void be_work_del_vxlan_port(struct work_struct *work)
Sathya Perlac9c47142014-03-27 10:46:19 +05305087{
Sathya Perlab7172412016-07-27 05:26:18 -04005088 struct be_cmd_work *cmd_work =
5089 container_of(work, struct be_cmd_work, work);
5090 struct be_adapter *adapter = cmd_work->adapter;
5091 __be16 port = cmd_work->info.vxlan_port;
Sathya Perlac9c47142014-03-27 10:46:19 +05305092
5093 if (adapter->vxlan_port != port)
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005094 goto done;
Sathya Perlac9c47142014-03-27 10:46:19 +05305095
Jiri Benc1e5b3112015-09-17 16:11:13 +02005096 if (adapter->vxlan_port_aliases) {
5097 adapter->vxlan_port_aliases--;
Sathya Perlab7172412016-07-27 05:26:18 -04005098 goto out;
Jiri Benc1e5b3112015-09-17 16:11:13 +02005099 }
5100
Sathya Perlac9c47142014-03-27 10:46:19 +05305101 be_disable_vxlan_offloads(adapter);
5102
5103 dev_info(&adapter->pdev->dev,
5104 "Disabled VxLAN offloads for UDP port %d\n",
5105 be16_to_cpu(port));
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005106done:
5107 adapter->vxlan_port_count--;
Sathya Perlab7172412016-07-27 05:26:18 -04005108out:
5109 kfree(cmd_work);
5110}
5111
5112static void be_cfg_vxlan_port(struct net_device *netdev,
5113 struct udp_tunnel_info *ti,
5114 void (*func)(struct work_struct *))
5115{
5116 struct be_adapter *adapter = netdev_priv(netdev);
5117 struct be_cmd_work *cmd_work;
5118
5119 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
5120 return;
5121
5122 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
5123 return;
5124
5125 cmd_work = be_alloc_work(adapter, func);
5126 if (cmd_work) {
5127 cmd_work->info.vxlan_port = ti->port;
5128 queue_work(be_wq, &cmd_work->work);
5129 }
5130}
5131
5132static void be_del_vxlan_port(struct net_device *netdev,
5133 struct udp_tunnel_info *ti)
5134{
5135 be_cfg_vxlan_port(netdev, ti, be_work_del_vxlan_port);
5136}
5137
5138static void be_add_vxlan_port(struct net_device *netdev,
5139 struct udp_tunnel_info *ti)
5140{
5141 be_cfg_vxlan_port(netdev, ti, be_work_add_vxlan_port);
Sathya Perlac9c47142014-03-27 10:46:19 +05305142}
Joe Stringer725d5482014-11-13 16:38:13 -08005143
Jesse Gross5f352272014-12-23 22:37:26 -08005144static netdev_features_t be_features_check(struct sk_buff *skb,
5145 struct net_device *dev,
5146 netdev_features_t features)
Joe Stringer725d5482014-11-13 16:38:13 -08005147{
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05305148 struct be_adapter *adapter = netdev_priv(dev);
5149 u8 l4_hdr = 0;
5150
Vlad Yasevich9c6cfd52017-05-23 13:38:42 -04005151 /* The code below restricts offload features for some tunneled and
5152 * Q-in-Q packets.
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05305153 * Offload features for normal (non tunnel) packets are unchanged.
5154 */
Vlad Yasevich9c6cfd52017-05-23 13:38:42 -04005155 features = vlan_features_check(skb, features);
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05305156 if (!skb->encapsulation ||
5157 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
5158 return features;
5159
5160 /* It's an encapsulated packet and VxLAN offloads are enabled. We
5161 * should disable tunnel offload features if it's not a VxLAN packet,
5162 * as tunnel offloads have been enabled only for VxLAN. This is done to
5163 * allow other tunneled traffic like GRE work fine while VxLAN
5164 * offloads are configured in Skyhawk-R.
5165 */
5166 switch (vlan_get_protocol(skb)) {
5167 case htons(ETH_P_IP):
5168 l4_hdr = ip_hdr(skb)->protocol;
5169 break;
5170 case htons(ETH_P_IPV6):
5171 l4_hdr = ipv6_hdr(skb)->nexthdr;
5172 break;
5173 default:
5174 return features;
5175 }
5176
5177 if (l4_hdr != IPPROTO_UDP ||
5178 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
5179 skb->inner_protocol != htons(ETH_P_TEB) ||
5180 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
5181 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
Tom Herberta1882222015-12-14 11:19:43 -08005182 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05305183
5184 return features;
Joe Stringer725d5482014-11-13 16:38:13 -08005185}
Sathya Perlac9c47142014-03-27 10:46:19 +05305186
Sriharsha Basavapatnaa155a5d2015-07-22 11:15:12 +05305187static int be_get_phys_port_id(struct net_device *dev,
5188 struct netdev_phys_item_id *ppid)
5189{
5190 int i, id_len = CNTL_SERIAL_NUM_WORDS * CNTL_SERIAL_NUM_WORD_SZ + 1;
5191 struct be_adapter *adapter = netdev_priv(dev);
5192 u8 *id;
5193
5194 if (MAX_PHYS_ITEM_ID_LEN < id_len)
5195 return -ENOSPC;
5196
5197 ppid->id[0] = adapter->hba_port_num + 1;
5198 id = &ppid->id[1];
5199 for (i = CNTL_SERIAL_NUM_WORDS - 1; i >= 0;
5200 i--, id += CNTL_SERIAL_NUM_WORD_SZ)
5201 memcpy(id, &adapter->serial_num[i], CNTL_SERIAL_NUM_WORD_SZ);
5202
5203 ppid->id_len = id_len;
5204
5205 return 0;
5206}
5207
Sathya Perlab7172412016-07-27 05:26:18 -04005208static void be_set_rx_mode(struct net_device *dev)
5209{
5210 struct be_adapter *adapter = netdev_priv(dev);
5211 struct be_cmd_work *work;
5212
5213 work = be_alloc_work(adapter, be_work_set_rx_mode);
5214 if (work)
5215 queue_work(be_wq, &work->work);
5216}
5217
stephen hemmingere5686ad2012-01-05 19:10:25 +00005218static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005219 .ndo_open = be_open,
5220 .ndo_stop = be_close,
5221 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00005222 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005223 .ndo_set_mac_address = be_mac_addr_set,
5224 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00005225 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005226 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005227 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
5228 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00005229 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00005230 .ndo_set_vf_vlan = be_set_vf_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04005231 .ndo_set_vf_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00005232 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05305233 .ndo_set_vf_link_state = be_set_vf_link_state,
Kalesh APe7bcbd72015-05-06 05:30:32 -04005234 .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
Ivan Vecera66268732011-12-08 01:31:21 +00005235#ifdef CONFIG_NET_POLL_CONTROLLER
5236 .ndo_poll_controller = be_netpoll,
5237#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005238 .ndo_bridge_setlink = be_ndo_bridge_setlink,
5239 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05305240#ifdef CONFIG_NET_RX_BUSY_POLL
Sathya Perlac9c47142014-03-27 10:46:19 +05305241 .ndo_busy_poll = be_busy_poll,
Sathya Perla6384a4d2013-10-25 10:40:16 +05305242#endif
Alexander Duyckbde6b7c2016-06-16 12:21:43 -07005243 .ndo_udp_tunnel_add = be_add_vxlan_port,
5244 .ndo_udp_tunnel_del = be_del_vxlan_port,
Jesse Gross5f352272014-12-23 22:37:26 -08005245 .ndo_features_check = be_features_check,
Sriharsha Basavapatnaa155a5d2015-07-22 11:15:12 +05305246 .ndo_get_phys_port_id = be_get_phys_port_id,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005247};
5248
5249static void be_netdev_init(struct net_device *netdev)
5250{
5251 struct be_adapter *adapter = netdev_priv(netdev);
5252
Michał Mirosław6332c8d2011-04-07 02:43:48 +00005253 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00005254 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00005255 NETIF_F_HW_VLAN_CTAG_TX;
Ajit Khaparde62219062016-02-10 22:45:53 +05305256 if ((be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00005257 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00005258
5259 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00005260 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00005261
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07005262 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00005263 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00005264
Ajit Khapardefbc13f02012-03-18 06:23:21 +00005265 netdev->priv_flags |= IFF_UNICAST_FLT;
5266
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005267 netdev->flags |= IFF_MULTICAST;
5268
ajit.khaparde@broadcom.com127bfce2016-02-23 00:35:01 +05305269 netif_set_gso_max_size(netdev, BE_MAX_GSO_SIZE - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00005270
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005271 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005272
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00005273 netdev->ethtool_ops = &be_ethtool_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005274}
5275
Kalesh AP87ac1a52015-02-23 04:20:15 -05005276static void be_cleanup(struct be_adapter *adapter)
5277{
5278 struct net_device *netdev = adapter->netdev;
5279
5280 rtnl_lock();
5281 netif_device_detach(netdev);
5282 if (netif_running(netdev))
5283 be_close(netdev);
5284 rtnl_unlock();
5285
5286 be_clear(adapter);
5287}
5288
Kalesh AP484d76fd2015-02-23 04:20:14 -05005289static int be_resume(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005290{
Kalesh APd0e1b312015-02-23 04:20:12 -05005291 struct net_device *netdev = adapter->netdev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005292 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005293
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005294 status = be_setup(adapter);
5295 if (status)
Kalesh AP484d76fd2015-02-23 04:20:14 -05005296 return status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005297
Hannes Frederic Sowa08d99102016-04-18 21:19:42 +02005298 rtnl_lock();
5299 if (netif_running(netdev))
Kalesh APd0e1b312015-02-23 04:20:12 -05005300 status = be_open(netdev);
Hannes Frederic Sowa08d99102016-04-18 21:19:42 +02005301 rtnl_unlock();
5302
5303 if (status)
5304 return status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005305
Kalesh APd0e1b312015-02-23 04:20:12 -05005306 netif_device_attach(netdev);
5307
Kalesh AP484d76fd2015-02-23 04:20:14 -05005308 return 0;
5309}
5310
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305311static void be_soft_reset(struct be_adapter *adapter)
5312{
5313 u32 val;
5314
5315 dev_info(&adapter->pdev->dev, "Initiating chip soft reset\n");
5316 val = ioread32(adapter->pcicfg + SLIPORT_SOFTRESET_OFFSET);
5317 val |= SLIPORT_SOFTRESET_SR_MASK;
5318 iowrite32(val, adapter->pcicfg + SLIPORT_SOFTRESET_OFFSET);
5319}
5320
5321static bool be_err_is_recoverable(struct be_adapter *adapter)
5322{
5323 struct be_error_recovery *err_rec = &adapter->error_recovery;
5324 unsigned long initial_idle_time =
5325 msecs_to_jiffies(ERR_RECOVERY_IDLE_TIME);
5326 unsigned long recovery_interval =
5327 msecs_to_jiffies(ERR_RECOVERY_INTERVAL);
5328 u16 ue_err_code;
5329 u32 val;
5330
5331 val = be_POST_stage_get(adapter);
5332 if ((val & POST_STAGE_RECOVERABLE_ERR) != POST_STAGE_RECOVERABLE_ERR)
5333 return false;
5334 ue_err_code = val & POST_ERR_RECOVERY_CODE_MASK;
5335 if (ue_err_code == 0)
5336 return false;
5337
5338 dev_err(&adapter->pdev->dev, "Recoverable HW error code: 0x%x\n",
5339 ue_err_code);
5340
5341 if (jiffies - err_rec->probe_time <= initial_idle_time) {
5342 dev_err(&adapter->pdev->dev,
5343 "Cannot recover within %lu sec from driver load\n",
5344 jiffies_to_msecs(initial_idle_time) / MSEC_PER_SEC);
5345 return false;
5346 }
5347
5348 if (err_rec->last_recovery_time &&
5349 (jiffies - err_rec->last_recovery_time <= recovery_interval)) {
5350 dev_err(&adapter->pdev->dev,
5351 "Cannot recover within %lu sec from last recovery\n",
5352 jiffies_to_msecs(recovery_interval) / MSEC_PER_SEC);
5353 return false;
5354 }
5355
5356 if (ue_err_code == err_rec->last_err_code) {
5357 dev_err(&adapter->pdev->dev,
5358 "Cannot recover from a consecutive TPE error\n");
5359 return false;
5360 }
5361
5362 err_rec->last_recovery_time = jiffies;
5363 err_rec->last_err_code = ue_err_code;
5364 return true;
5365}
5366
5367static int be_tpe_recover(struct be_adapter *adapter)
5368{
5369 struct be_error_recovery *err_rec = &adapter->error_recovery;
5370 int status = -EAGAIN;
5371 u32 val;
5372
5373 switch (err_rec->recovery_state) {
5374 case ERR_RECOVERY_ST_NONE:
5375 err_rec->recovery_state = ERR_RECOVERY_ST_DETECT;
5376 err_rec->resched_delay = ERR_RECOVERY_UE_DETECT_DURATION;
5377 break;
5378
5379 case ERR_RECOVERY_ST_DETECT:
5380 val = be_POST_stage_get(adapter);
5381 if ((val & POST_STAGE_RECOVERABLE_ERR) !=
5382 POST_STAGE_RECOVERABLE_ERR) {
5383 dev_err(&adapter->pdev->dev,
5384 "Unrecoverable HW error detected: 0x%x\n", val);
5385 status = -EINVAL;
5386 err_rec->resched_delay = 0;
5387 break;
5388 }
5389
5390 dev_err(&adapter->pdev->dev, "Recoverable HW error detected\n");
5391
5392 /* Only PF0 initiates Chip Soft Reset. But PF0 must wait UE2SR
5393 * milliseconds before it checks for final error status in
5394 * SLIPORT_SEMAPHORE to determine if recovery criteria is met.
5395 * If it does, then PF0 initiates a Soft Reset.
5396 */
5397 if (adapter->pf_num == 0) {
5398 err_rec->recovery_state = ERR_RECOVERY_ST_RESET;
5399 err_rec->resched_delay = err_rec->ue_to_reset_time -
5400 ERR_RECOVERY_UE_DETECT_DURATION;
5401 break;
5402 }
5403
5404 err_rec->recovery_state = ERR_RECOVERY_ST_PRE_POLL;
5405 err_rec->resched_delay = err_rec->ue_to_poll_time -
5406 ERR_RECOVERY_UE_DETECT_DURATION;
5407 break;
5408
5409 case ERR_RECOVERY_ST_RESET:
5410 if (!be_err_is_recoverable(adapter)) {
5411 dev_err(&adapter->pdev->dev,
5412 "Failed to meet recovery criteria\n");
5413 status = -EIO;
5414 err_rec->resched_delay = 0;
5415 break;
5416 }
5417 be_soft_reset(adapter);
5418 err_rec->recovery_state = ERR_RECOVERY_ST_PRE_POLL;
5419 err_rec->resched_delay = err_rec->ue_to_poll_time -
5420 err_rec->ue_to_reset_time;
5421 break;
5422
5423 case ERR_RECOVERY_ST_PRE_POLL:
5424 err_rec->recovery_state = ERR_RECOVERY_ST_REINIT;
5425 err_rec->resched_delay = 0;
5426 status = 0; /* done */
5427 break;
5428
5429 default:
5430 status = -EINVAL;
5431 err_rec->resched_delay = 0;
5432 break;
5433 }
5434
5435 return status;
5436}
5437
Kalesh AP484d76fd2015-02-23 04:20:14 -05005438static int be_err_recover(struct be_adapter *adapter)
5439{
Kalesh AP484d76fd2015-02-23 04:20:14 -05005440 int status;
5441
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305442 if (!lancer_chip(adapter)) {
5443 if (!adapter->error_recovery.recovery_supported ||
5444 adapter->priv_flags & BE_DISABLE_TPE_RECOVERY)
5445 return -EIO;
5446 status = be_tpe_recover(adapter);
5447 if (status)
5448 goto err;
5449 }
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305450
5451 /* Wait for adapter to reach quiescent state before
5452 * destroying queues
5453 */
5454 status = be_fw_wait_ready(adapter);
5455 if (status)
5456 goto err;
5457
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305458 adapter->flags |= BE_FLAGS_TRY_RECOVERY;
5459
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305460 be_cleanup(adapter);
5461
Kalesh AP484d76fd2015-02-23 04:20:14 -05005462 status = be_resume(adapter);
5463 if (status)
5464 goto err;
5465
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305466 adapter->flags &= ~BE_FLAGS_TRY_RECOVERY;
5467
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005468err:
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005469 return status;
5470}
5471
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005472static void be_err_detection_task(struct work_struct *work)
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005473{
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305474 struct be_error_recovery *err_rec =
5475 container_of(work, struct be_error_recovery,
5476 err_detection_work.work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005477 struct be_adapter *adapter =
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305478 container_of(err_rec, struct be_adapter,
5479 error_recovery);
5480 u32 resched_delay = ERR_RECOVERY_DETECTION_DELAY;
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305481 struct device *dev = &adapter->pdev->dev;
5482 int recovery_status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005483
5484 be_detect_error(adapter);
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305485 if (!be_check_error(adapter, BE_ERROR_HW))
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305486 goto reschedule_task;
Kalesh APd0e1b312015-02-23 04:20:12 -05005487
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305488 recovery_status = be_err_recover(adapter);
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305489 if (!recovery_status) {
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305490 err_rec->recovery_retries = 0;
5491 err_rec->recovery_state = ERR_RECOVERY_ST_NONE;
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305492 dev_info(dev, "Adapter recovery successful\n");
5493 goto reschedule_task;
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305494 } else if (!lancer_chip(adapter) && err_rec->resched_delay) {
5495 /* BEx/SH recovery state machine */
5496 if (adapter->pf_num == 0 &&
5497 err_rec->recovery_state > ERR_RECOVERY_ST_DETECT)
5498 dev_err(&adapter->pdev->dev,
5499 "Adapter recovery in progress\n");
5500 resched_delay = err_rec->resched_delay;
5501 goto reschedule_task;
5502 } else if (lancer_chip(adapter) && be_virtfn(adapter)) {
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305503 /* For VFs, check if PF have allocated resources
5504 * every second.
5505 */
5506 dev_err(dev, "Re-trying adapter recovery\n");
5507 goto reschedule_task;
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305508 } else if (lancer_chip(adapter) && err_rec->recovery_retries++ <
5509 ERR_RECOVERY_MAX_RETRY_COUNT) {
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305510 /* In case of another error during recovery, it takes 30 sec
5511 * for adapter to come out of error. Retry error recovery after
5512 * this time interval.
5513 */
5514 dev_err(&adapter->pdev->dev, "Re-trying adapter recovery\n");
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305515 resched_delay = ERR_RECOVERY_RETRY_DELAY;
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305516 goto reschedule_task;
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305517 } else {
5518 dev_err(dev, "Adapter recovery failed\n");
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305519 dev_err(dev, "Please reboot server to recover\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005520 }
5521
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305522 return;
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305523
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305524reschedule_task:
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305525 be_schedule_err_detection(adapter, resched_delay);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005526}
5527
Vasundhara Volam21252372015-02-06 08:18:42 -05005528static void be_log_sfp_info(struct be_adapter *adapter)
5529{
5530 int status;
5531
5532 status = be_cmd_query_sfp_info(adapter);
5533 if (!status) {
5534 dev_err(&adapter->pdev->dev,
Ajit Khaparde51d1f982016-02-10 22:45:54 +05305535 "Port %c: %s Vendor: %s part no: %s",
5536 adapter->port_name,
5537 be_misconfig_evt_port_state[adapter->phy_state],
5538 adapter->phy.vendor_name,
Vasundhara Volam21252372015-02-06 08:18:42 -05005539 adapter->phy.vendor_pn);
5540 }
Ajit Khaparde51d1f982016-02-10 22:45:54 +05305541 adapter->flags &= ~BE_FLAGS_PHY_MISCONFIGURED;
Vasundhara Volam21252372015-02-06 08:18:42 -05005542}
5543
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005544static void be_worker(struct work_struct *work)
5545{
5546 struct be_adapter *adapter =
5547 container_of(work, struct be_adapter, work.work);
5548 struct be_rx_obj *rxo;
5549 int i;
5550
Guilherme G. Piccolid3480612016-07-26 17:39:42 -03005551 if (be_physfn(adapter) &&
5552 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
5553 be_cmd_get_die_temperature(adapter);
5554
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005555 /* when interrupts are not yet enabled, just reap any pending
Sathya Perla78fad34e2015-02-23 04:20:08 -05005556 * mcc completions
5557 */
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005558 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00005559 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005560 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00005561 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005562 goto reschedule;
5563 }
5564
5565 if (!adapter->stats_cmd_sent) {
5566 if (lancer_chip(adapter))
5567 lancer_cmd_get_pport_stats(adapter,
Kalesh APcd3307aa2014-09-19 15:47:02 +05305568 &adapter->stats_cmd);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005569 else
5570 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5571 }
5572
5573 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05305574 /* Replenish RX-queues starved due to memory
5575 * allocation failures.
5576 */
5577 if (rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05305578 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005579 }
5580
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04005581 /* EQ-delay update for Skyhawk is done while notifying EQ */
5582 if (!skyhawk_chip(adapter))
5583 be_eqd_update(adapter, false);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005584
Ajit Khaparde51d1f982016-02-10 22:45:54 +05305585 if (adapter->flags & BE_FLAGS_PHY_MISCONFIGURED)
Vasundhara Volam21252372015-02-06 08:18:42 -05005586 be_log_sfp_info(adapter);
5587
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005588reschedule:
5589 adapter->work_counter++;
Sathya Perlab7172412016-07-27 05:26:18 -04005590 queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005591}
5592
Sathya Perla78fad34e2015-02-23 04:20:08 -05005593static void be_unmap_pci_bars(struct be_adapter *adapter)
5594{
5595 if (adapter->csr)
5596 pci_iounmap(adapter->pdev, adapter->csr);
5597 if (adapter->db)
5598 pci_iounmap(adapter->pdev, adapter->db);
Douglas Millera69bf3c2016-03-04 15:36:56 -06005599 if (adapter->pcicfg && adapter->pcicfg_mapped)
5600 pci_iounmap(adapter->pdev, adapter->pcicfg);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005601}
5602
5603static int db_bar(struct be_adapter *adapter)
5604{
Kalesh AP18c57c72015-05-06 05:30:38 -04005605 if (lancer_chip(adapter) || be_virtfn(adapter))
Sathya Perla78fad34e2015-02-23 04:20:08 -05005606 return 0;
5607 else
5608 return 4;
5609}
5610
5611static int be_roce_map_pci_bars(struct be_adapter *adapter)
5612{
5613 if (skyhawk_chip(adapter)) {
5614 adapter->roce_db.size = 4096;
5615 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5616 db_bar(adapter));
5617 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5618 db_bar(adapter));
5619 }
5620 return 0;
5621}
5622
5623static int be_map_pci_bars(struct be_adapter *adapter)
5624{
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005625 struct pci_dev *pdev = adapter->pdev;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005626 u8 __iomem *addr;
5627 u32 sli_intf;
5628
5629 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5630 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5631 SLI_INTF_FAMILY_SHIFT;
5632 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
5633
5634 if (BEx_chip(adapter) && be_physfn(adapter)) {
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005635 adapter->csr = pci_iomap(pdev, 2, 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005636 if (!adapter->csr)
5637 return -ENOMEM;
5638 }
5639
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005640 addr = pci_iomap(pdev, db_bar(adapter), 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005641 if (!addr)
5642 goto pci_map_err;
5643 adapter->db = addr;
5644
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005645 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5646 if (be_physfn(adapter)) {
5647 /* PCICFG is the 2nd BAR in BE2 */
5648 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5649 if (!addr)
5650 goto pci_map_err;
5651 adapter->pcicfg = addr;
Douglas Millera69bf3c2016-03-04 15:36:56 -06005652 adapter->pcicfg_mapped = true;
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005653 } else {
5654 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
Douglas Millera69bf3c2016-03-04 15:36:56 -06005655 adapter->pcicfg_mapped = false;
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005656 }
5657 }
5658
Sathya Perla78fad34e2015-02-23 04:20:08 -05005659 be_roce_map_pci_bars(adapter);
5660 return 0;
5661
5662pci_map_err:
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005663 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
Sathya Perla78fad34e2015-02-23 04:20:08 -05005664 be_unmap_pci_bars(adapter);
5665 return -ENOMEM;
5666}
5667
5668static void be_drv_cleanup(struct be_adapter *adapter)
5669{
5670 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
5671 struct device *dev = &adapter->pdev->dev;
5672
5673 if (mem->va)
5674 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5675
5676 mem = &adapter->rx_filter;
5677 if (mem->va)
5678 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5679
5680 mem = &adapter->stats_cmd;
5681 if (mem->va)
5682 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5683}
5684
5685/* Allocate and initialize various fields in be_adapter struct */
5686static int be_drv_init(struct be_adapter *adapter)
5687{
5688 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5689 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5690 struct be_dma_mem *rx_filter = &adapter->rx_filter;
5691 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5692 struct device *dev = &adapter->pdev->dev;
5693 int status = 0;
5694
5695 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05305696 mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size,
5697 &mbox_mem_alloc->dma,
5698 GFP_KERNEL);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005699 if (!mbox_mem_alloc->va)
5700 return -ENOMEM;
5701
5702 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5703 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5704 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005705
5706 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
5707 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
5708 &rx_filter->dma, GFP_KERNEL);
5709 if (!rx_filter->va) {
5710 status = -ENOMEM;
5711 goto free_mbox;
5712 }
5713
5714 if (lancer_chip(adapter))
5715 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5716 else if (BE2_chip(adapter))
5717 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5718 else if (BE3_chip(adapter))
5719 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5720 else
5721 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5722 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
5723 &stats_cmd->dma, GFP_KERNEL);
5724 if (!stats_cmd->va) {
5725 status = -ENOMEM;
5726 goto free_rx_filter;
5727 }
5728
5729 mutex_init(&adapter->mbox_lock);
Sathya Perlab7172412016-07-27 05:26:18 -04005730 mutex_init(&adapter->mcc_lock);
5731 mutex_init(&adapter->rx_filter_lock);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005732 spin_lock_init(&adapter->mcc_cq_lock);
5733 init_completion(&adapter->et_cmd_compl);
5734
5735 pci_save_state(adapter->pdev);
5736
5737 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305738
5739 adapter->error_recovery.recovery_state = ERR_RECOVERY_ST_NONE;
5740 adapter->error_recovery.resched_delay = 0;
5741 INIT_DELAYED_WORK(&adapter->error_recovery.err_detection_work,
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005742 be_err_detection_task);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005743
5744 adapter->rx_fc = true;
5745 adapter->tx_fc = true;
5746
5747 /* Must be a power of 2 or else MODULO will BUG_ON */
5748 adapter->be_get_temp_freq = 64;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005749
5750 return 0;
5751
5752free_rx_filter:
5753 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5754free_mbox:
5755 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5756 mbox_mem_alloc->dma);
5757 return status;
5758}
5759
5760static void be_remove(struct pci_dev *pdev)
5761{
5762 struct be_adapter *adapter = pci_get_drvdata(pdev);
5763
5764 if (!adapter)
5765 return;
5766
5767 be_roce_dev_remove(adapter);
5768 be_intr_set(adapter, false);
5769
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005770 be_cancel_err_detection(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005771
5772 unregister_netdev(adapter->netdev);
5773
5774 be_clear(adapter);
5775
Somnath Koturf72099e2016-09-07 19:57:50 +05305776 if (!pci_vfs_assigned(adapter->pdev))
5777 be_cmd_reset_function(adapter);
5778
Sathya Perla78fad34e2015-02-23 04:20:08 -05005779 /* tell fw we're done with firing cmds */
5780 be_cmd_fw_clean(adapter);
5781
5782 be_unmap_pci_bars(adapter);
5783 be_drv_cleanup(adapter);
5784
5785 pci_disable_pcie_error_reporting(pdev);
5786
5787 pci_release_regions(pdev);
5788 pci_disable_device(pdev);
5789
5790 free_netdev(adapter->netdev);
5791}
5792
Arnd Bergmann9a032592015-05-18 23:06:45 +02005793static ssize_t be_hwmon_show_temp(struct device *dev,
5794 struct device_attribute *dev_attr,
5795 char *buf)
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305796{
5797 struct be_adapter *adapter = dev_get_drvdata(dev);
5798
5799 /* Unit: millidegree Celsius */
5800 if (adapter->hwmon_info.be_on_die_temp == BE_INVALID_DIE_TEMP)
5801 return -EIO;
5802 else
5803 return sprintf(buf, "%u\n",
5804 adapter->hwmon_info.be_on_die_temp * 1000);
5805}
5806
5807static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
5808 be_hwmon_show_temp, NULL, 1);
5809
5810static struct attribute *be_hwmon_attrs[] = {
5811 &sensor_dev_attr_temp1_input.dev_attr.attr,
5812 NULL
5813};
5814
5815ATTRIBUTE_GROUPS(be_hwmon);
5816
Sathya Perlad3791422012-09-28 04:39:44 +00005817static char *mc_name(struct be_adapter *adapter)
5818{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05305819 char *str = ""; /* default */
5820
5821 switch (adapter->mc_type) {
5822 case UMC:
5823 str = "UMC";
5824 break;
5825 case FLEX10:
5826 str = "FLEX10";
5827 break;
5828 case vNIC1:
5829 str = "vNIC-1";
5830 break;
5831 case nPAR:
5832 str = "nPAR";
5833 break;
5834 case UFP:
5835 str = "UFP";
5836 break;
5837 case vNIC2:
5838 str = "vNIC-2";
5839 break;
5840 default:
5841 str = "";
5842 }
5843
5844 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00005845}
5846
5847static inline char *func_name(struct be_adapter *adapter)
5848{
5849 return be_physfn(adapter) ? "PF" : "VF";
5850}
5851
Sathya Perlaf7062ee2015-02-06 08:18:35 -05005852static inline char *nic_name(struct pci_dev *pdev)
5853{
5854 switch (pdev->device) {
5855 case OC_DEVICE_ID1:
5856 return OC_NAME;
5857 case OC_DEVICE_ID2:
5858 return OC_NAME_BE;
5859 case OC_DEVICE_ID3:
5860 case OC_DEVICE_ID4:
5861 return OC_NAME_LANCER;
5862 case BE_DEVICE_ID2:
5863 return BE3_NAME;
5864 case OC_DEVICE_ID5:
5865 case OC_DEVICE_ID6:
5866 return OC_NAME_SH;
5867 default:
5868 return BE_NAME;
5869 }
5870}
5871
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00005872static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005873{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005874 struct be_adapter *adapter;
5875 struct net_device *netdev;
Vasundhara Volam21252372015-02-06 08:18:42 -05005876 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005877
Sathya Perlaacbafeb2014-09-02 09:56:46 +05305878 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5879
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005880 status = pci_enable_device(pdev);
5881 if (status)
5882 goto do_none;
5883
5884 status = pci_request_regions(pdev, DRV_NAME);
5885 if (status)
5886 goto disable_dev;
5887 pci_set_master(pdev);
5888
Sathya Perla7f640062012-06-05 19:37:20 +00005889 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Kalesh APddf11692014-07-17 16:20:28 +05305890 if (!netdev) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005891 status = -ENOMEM;
5892 goto rel_reg;
5893 }
5894 adapter = netdev_priv(netdev);
5895 adapter->pdev = pdev;
5896 pci_set_drvdata(pdev, adapter);
5897 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005898 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005899
Russell King4c15c242013-06-26 23:49:11 +01005900 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005901 if (!status) {
5902 netdev->features |= NETIF_F_HIGHDMA;
5903 } else {
Russell King4c15c242013-06-26 23:49:11 +01005904 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005905 if (status) {
5906 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5907 goto free_netdev;
5908 }
5909 }
5910
Kalesh AP2f951a92014-09-12 17:39:21 +05305911 status = pci_enable_pcie_error_reporting(pdev);
5912 if (!status)
5913 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
Sathya Perlad6b6d982012-09-05 01:56:48 +00005914
Sathya Perla78fad34e2015-02-23 04:20:08 -05005915 status = be_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005916 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00005917 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005918
Sathya Perla78fad34e2015-02-23 04:20:08 -05005919 status = be_drv_init(adapter);
5920 if (status)
5921 goto unmap_bars;
5922
Sathya Perla5fb379e2009-06-18 00:02:59 +00005923 status = be_setup(adapter);
5924 if (status)
Sathya Perla78fad34e2015-02-23 04:20:08 -05005925 goto drv_cleanup;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005926
Sathya Perla3abcded2010-10-03 22:12:27 -07005927 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005928 status = register_netdev(netdev);
5929 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00005930 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005931
Parav Pandit045508a2012-03-26 14:27:13 +00005932 be_roce_dev_add(adapter);
5933
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305934 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305935 adapter->error_recovery.probe_time = jiffies;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00005936
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305937 /* On Die temperature not supported for VF. */
Arnd Bergmann9a032592015-05-18 23:06:45 +02005938 if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305939 adapter->hwmon_info.hwmon_dev =
5940 devm_hwmon_device_register_with_groups(&pdev->dev,
5941 DRV_NAME,
5942 adapter,
5943 be_hwmon_groups);
5944 adapter->hwmon_info.be_on_die_temp = BE_INVALID_DIE_TEMP;
5945 }
5946
Sathya Perlad3791422012-09-28 04:39:44 +00005947 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
Vasundhara Volam21252372015-02-06 08:18:42 -05005948 func_name(adapter), mc_name(adapter), adapter->port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00005949
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005950 return 0;
5951
Sathya Perla5fb379e2009-06-18 00:02:59 +00005952unsetup:
5953 be_clear(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005954drv_cleanup:
5955 be_drv_cleanup(adapter);
5956unmap_bars:
5957 be_unmap_pci_bars(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00005958free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00005959 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005960rel_reg:
5961 pci_release_regions(pdev);
5962disable_dev:
5963 pci_disable_device(pdev);
5964do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07005965 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005966 return status;
5967}
5968
5969static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5970{
5971 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005972
Ajit Khaparded4360d62013-11-22 12:51:09 -06005973 be_intr_set(adapter, false);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005974 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005975
Kalesh AP87ac1a52015-02-23 04:20:15 -05005976 be_cleanup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005977
5978 pci_save_state(pdev);
5979 pci_disable_device(pdev);
5980 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5981 return 0;
5982}
5983
Kalesh AP484d76fd2015-02-23 04:20:14 -05005984static int be_pci_resume(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005985{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005986 struct be_adapter *adapter = pci_get_drvdata(pdev);
Kalesh AP484d76fd2015-02-23 04:20:14 -05005987 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005988
5989 status = pci_enable_device(pdev);
5990 if (status)
5991 return status;
5992
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005993 pci_restore_state(pdev);
5994
Kalesh AP484d76fd2015-02-23 04:20:14 -05005995 status = be_resume(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00005996 if (status)
5997 return status;
5998
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305999 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05006000
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006001 return 0;
6002}
6003
Sathya Perla82456b02010-02-17 01:35:37 +00006004/*
6005 * An FLR will stop BE from DMAing any data.
6006 */
6007static void be_shutdown(struct pci_dev *pdev)
6008{
6009 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00006010
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00006011 if (!adapter)
6012 return;
Sathya Perla82456b02010-02-17 01:35:37 +00006013
Devesh Sharmad114f992014-06-10 19:32:15 +05306014 be_roce_dev_shutdown(adapter);
Sathya Perla0f4a6822011-03-21 20:49:28 +00006015 cancel_delayed_work_sync(&adapter->work);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05006016 be_cancel_err_detection(adapter);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00006017
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00006018 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00006019
Ajit Khaparde57841862011-04-06 18:08:43 +00006020 be_cmd_reset_function(adapter);
6021
Sathya Perla82456b02010-02-17 01:35:37 +00006022 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00006023}
6024
Sathya Perlacf588472010-02-14 21:22:01 +00006025static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05306026 pci_channel_state_t state)
Sathya Perlacf588472010-02-14 21:22:01 +00006027{
6028 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00006029
6030 dev_err(&adapter->pdev->dev, "EEH error detected\n");
6031
Padmanabh Ratnakar68f22792016-02-18 03:09:34 +05306032 be_roce_dev_remove(adapter);
6033
Venkata Duvvuru954f6822015-05-13 13:00:13 +05306034 if (!be_check_error(adapter, BE_ERROR_EEH)) {
6035 be_set_error(adapter, BE_ERROR_EEH);
Sathya Perlacf588472010-02-14 21:22:01 +00006036
Sathya Perlaeb7dd462015-02-23 04:20:11 -05006037 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00006038
Kalesh AP87ac1a52015-02-23 04:20:15 -05006039 be_cleanup(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00006040 }
Sathya Perlacf588472010-02-14 21:22:01 +00006041
6042 if (state == pci_channel_io_perm_failure)
6043 return PCI_ERS_RESULT_DISCONNECT;
6044
6045 pci_disable_device(pdev);
6046
Somnath Kotureeb7fc72012-05-02 03:41:01 +00006047 /* The error could cause the FW to trigger a flash debug dump.
6048 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00006049 * can cause it not to recover; wait for it to finish.
6050 * Wait only for first function as it is needed only once per
6051 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00006052 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00006053 if (pdev->devfn == 0)
6054 ssleep(30);
6055
Sathya Perlacf588472010-02-14 21:22:01 +00006056 return PCI_ERS_RESULT_NEED_RESET;
6057}
6058
6059static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
6060{
6061 struct be_adapter *adapter = pci_get_drvdata(pdev);
6062 int status;
6063
6064 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00006065
6066 status = pci_enable_device(pdev);
6067 if (status)
6068 return PCI_ERS_RESULT_DISCONNECT;
6069
6070 pci_set_master(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00006071 pci_restore_state(pdev);
6072
6073 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00006074 dev_info(&adapter->pdev->dev,
6075 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00006076 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00006077 if (status)
6078 return PCI_ERS_RESULT_DISCONNECT;
6079
Sathya Perlad6b6d982012-09-05 01:56:48 +00006080 pci_cleanup_aer_uncorrect_error_status(pdev);
Venkata Duvvuru954f6822015-05-13 13:00:13 +05306081 be_clear_error(adapter, BE_CLEAR_ALL);
Sathya Perlacf588472010-02-14 21:22:01 +00006082 return PCI_ERS_RESULT_RECOVERED;
6083}
6084
6085static void be_eeh_resume(struct pci_dev *pdev)
6086{
6087 int status = 0;
6088 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00006089
6090 dev_info(&adapter->pdev->dev, "EEH resume\n");
6091
6092 pci_save_state(pdev);
6093
Kalesh AP484d76fd2015-02-23 04:20:14 -05006094 status = be_resume(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00006095 if (status)
6096 goto err;
6097
Padmanabh Ratnakar68f22792016-02-18 03:09:34 +05306098 be_roce_dev_add(adapter);
6099
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05306100 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
Sathya Perlacf588472010-02-14 21:22:01 +00006101 return;
6102err:
6103 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00006104}
6105
Vasundhara Volamace40af2015-03-04 00:44:34 -05006106static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
6107{
6108 struct be_adapter *adapter = pci_get_drvdata(pdev);
Suresh Reddyb9263cb2016-06-06 07:22:08 -04006109 struct be_resources vft_res = {0};
Vasundhara Volamace40af2015-03-04 00:44:34 -05006110 int status;
6111
6112 if (!num_vfs)
6113 be_vf_clear(adapter);
6114
6115 adapter->num_vfs = num_vfs;
6116
6117 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
6118 dev_warn(&pdev->dev,
6119 "Cannot disable VFs while they are assigned\n");
6120 return -EBUSY;
6121 }
6122
6123 /* When the HW is in SRIOV capable configuration, the PF-pool resources
6124 * are equally distributed across the max-number of VFs. The user may
6125 * request only a subset of the max-vfs to be enabled.
6126 * Based on num_vfs, redistribute the resources across num_vfs so that
6127 * each VF will have access to more number of resources.
6128 * This facility is not available in BE3 FW.
6129 * Also, this is done by FW in Lancer chip.
6130 */
6131 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
Suresh Reddyb9263cb2016-06-06 07:22:08 -04006132 be_calculate_vf_res(adapter, adapter->num_vfs,
6133 &vft_res);
Vasundhara Volamace40af2015-03-04 00:44:34 -05006134 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
Suresh Reddyb9263cb2016-06-06 07:22:08 -04006135 adapter->num_vfs, &vft_res);
Vasundhara Volamace40af2015-03-04 00:44:34 -05006136 if (status)
6137 dev_err(&pdev->dev,
6138 "Failed to optimize SR-IOV resources\n");
6139 }
6140
6141 status = be_get_resources(adapter);
6142 if (status)
6143 return be_cmd_status(status);
6144
6145 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
6146 rtnl_lock();
6147 status = be_update_queues(adapter);
6148 rtnl_unlock();
6149 if (status)
6150 return be_cmd_status(status);
6151
6152 if (adapter->num_vfs)
6153 status = be_vf_setup(adapter);
6154
6155 if (!status)
6156 return adapter->num_vfs;
6157
6158 return 0;
6159}
6160
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07006161static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00006162 .error_detected = be_eeh_err_detected,
6163 .slot_reset = be_eeh_reset,
6164 .resume = be_eeh_resume,
6165};
6166
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006167static struct pci_driver be_driver = {
6168 .name = DRV_NAME,
6169 .id_table = be_dev_ids,
6170 .probe = be_probe,
6171 .remove = be_remove,
6172 .suspend = be_suspend,
Kalesh AP484d76fd2015-02-23 04:20:14 -05006173 .resume = be_pci_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00006174 .shutdown = be_shutdown,
Vasundhara Volamace40af2015-03-04 00:44:34 -05006175 .sriov_configure = be_pci_sriov_configure,
Sathya Perlacf588472010-02-14 21:22:01 +00006176 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006177};
6178
6179static int __init be_init_module(void)
6180{
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05306181 int status;
6182
Joe Perches8e95a202009-12-03 07:58:21 +00006183 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
6184 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006185 printk(KERN_WARNING DRV_NAME
6186 " : Module param rx_frag_size must be 2048/4096/8192."
6187 " Using 2048\n");
6188 rx_frag_size = 2048;
6189 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006190
Vasundhara Volamace40af2015-03-04 00:44:34 -05006191 if (num_vfs > 0) {
6192 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
6193 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
6194 }
6195
Sathya Perlab7172412016-07-27 05:26:18 -04006196 be_wq = create_singlethread_workqueue("be_wq");
6197 if (!be_wq) {
6198 pr_warn(DRV_NAME "workqueue creation failed\n");
6199 return -1;
6200 }
6201
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05306202 be_err_recovery_workq =
6203 create_singlethread_workqueue("be_err_recover");
6204 if (!be_err_recovery_workq)
6205 pr_warn(DRV_NAME "Could not create error recovery workqueue\n");
6206
6207 status = pci_register_driver(&be_driver);
6208 if (status) {
6209 destroy_workqueue(be_wq);
6210 be_destroy_err_recovery_workq();
6211 }
6212 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006213}
6214module_init(be_init_module);
6215
6216static void __exit be_exit_module(void)
6217{
6218 pci_unregister_driver(&be_driver);
Sathya Perlab7172412016-07-27 05:26:18 -04006219
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05306220 be_destroy_err_recovery_workq();
6221
Sathya Perlab7172412016-07-27 05:26:18 -04006222 if (be_wq)
6223 destroy_workqueue(be_wq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006224}
6225module_exit(be_exit_module);