Victor Hsu | e0cd0e7 | 2021-06-08 11:05:03 +0800 | [diff] [blame] | 1 | /* |
| 2 | * DHD Linux header file - contains private structure definition of the Linux specific layer |
| 3 | * |
| 4 | * Copyright (C) 2021, Broadcom. |
| 5 | * |
| 6 | * Unless you and Broadcom execute a separate written software license |
| 7 | * agreement governing use of this software, this software is licensed to you |
| 8 | * under the terms of the GNU General Public License version 2 (the "GPL"), |
| 9 | * available at http://www.broadcom.com/licenses/GPLv2.php, with the |
| 10 | * following added to such license: |
| 11 | * |
| 12 | * As a special exception, the copyright holders of this software give you |
| 13 | * permission to link this software with independent modules, and to copy and |
| 14 | * distribute the resulting executable under terms of your choice, provided that |
| 15 | * you also meet, for each linked independent module, the terms and conditions of |
| 16 | * the license of that module. An independent module is a module which is not |
| 17 | * derived from this software. The special exception does not apply to any |
| 18 | * modifications of the software. |
| 19 | * |
| 20 | * |
| 21 | * <<Broadcom-WL-IPTag/Open:>> |
| 22 | * |
| 23 | * $Id$ |
| 24 | */ |
| 25 | |
| 26 | #ifndef __DHD_LINUX_PRIV_H__ |
| 27 | #define __DHD_LINUX_PRIV_H__ |
| 28 | |
| 29 | #include <osl.h> |
| 30 | |
| 31 | #ifdef SHOW_LOGTRACE |
| 32 | #include <linux/syscalls.h> |
| 33 | #include <event_log.h> |
| 34 | #endif /* SHOW_LOGTRACE */ |
| 35 | #include <linux/skbuff.h> |
| 36 | #include <linux/spinlock.h> |
| 37 | #include <linux/interrupt.h> |
| 38 | #ifdef CONFIG_COMPAT |
| 39 | #include <linux/compat.h> |
| 40 | #endif /* CONFIG COMPAT */ |
| 41 | #ifdef CONFIG_HAS_WAKELOCK |
| 42 | #include <linux/pm_wakeup.h> |
| 43 | #endif /* CONFIG_HAS_WAKELOCK */ |
| 44 | #include <dngl_stats.h> |
| 45 | #include <dhd.h> |
| 46 | #include <dhd_dbg.h> |
| 47 | #include <dhd_debug.h> |
| 48 | #include <dhd_linux.h> |
| 49 | #include <dhd_bus.h> |
| 50 | |
| 51 | #ifdef PCIE_FULL_DONGLE |
| 52 | #include <bcmmsgbuf.h> |
| 53 | #include <dhd_flowring.h> |
| 54 | #endif /* PCIE_FULL_DONGLE */ |
| 55 | |
| 56 | #ifdef DHD_QOS_ON_SOCK_FLOW |
| 57 | struct dhd_sock_qos_info; |
| 58 | #endif /* DHD_QOS_ON_SOCK_FLOW */ |
| 59 | |
| 60 | /* |
| 61 | * Do not include this header except for the dhd_linux.c dhd_linux_sysfs.c |
| 62 | * Local private structure (extension of pub) |
| 63 | */ |
| 64 | typedef struct dhd_info { |
| 65 | dhd_pub_t pub; |
| 66 | /* for supporting multiple interfaces. |
| 67 | * static_ifs hold the net ifaces without valid FW IF |
| 68 | */ |
| 69 | dhd_if_t *iflist[DHD_MAX_IFS + DHD_MAX_STATIC_IFS]; |
| 70 | void *adapter; /* adapter information, interrupt, fw path etc. */ |
| 71 | char fw_path[PATH_MAX]; /* path to firmware image */ |
| 72 | char nv_path[PATH_MAX]; /* path to nvram vars file */ |
| 73 | #ifdef DHD_UCODE_DOWNLOAD |
| 74 | char uc_path[PATH_MAX]; /* path to ucode image */ |
| 75 | #endif /* DHD_UCODE_DOWNLOAD */ |
| 76 | |
| 77 | /* serialize dhd iovars */ |
| 78 | struct mutex dhd_iovar_mutex; |
| 79 | |
| 80 | struct semaphore proto_sem; |
| 81 | #ifdef PROP_TXSTATUS |
| 82 | spinlock_t wlfc_spinlock; |
| 83 | |
| 84 | #ifdef BCMDBUS |
| 85 | ulong wlfc_lock_flags; |
| 86 | ulong wlfc_pub_lock_flags; |
| 87 | #endif |
| 88 | #endif /* PROP_TXSTATUS */ |
| 89 | wait_queue_head_t ioctl_resp_wait; |
| 90 | wait_queue_head_t d3ack_wait; |
| 91 | wait_queue_head_t dhd_bus_busy_state_wait; |
| 92 | wait_queue_head_t dmaxfer_wait; |
| 93 | #ifdef BT_OVER_PCIE |
| 94 | wait_queue_head_t quiesce_wait; |
| 95 | #endif /* BT_OVER_PCIE */ |
| 96 | uint32 default_wd_interval; |
| 97 | |
| 98 | timer_list_compat_t timer; |
| 99 | bool wd_timer_valid; |
| 100 | #ifdef DHD_PCIE_RUNTIMEPM |
| 101 | timer_list_compat_t rpm_timer; |
| 102 | bool rpm_timer_valid; |
| 103 | tsk_ctl_t thr_rpm_ctl; |
| 104 | #endif /* DHD_PCIE_RUNTIMEPM */ |
| 105 | struct tasklet_struct tasklet; |
| 106 | spinlock_t sdlock; |
| 107 | spinlock_t txqlock; |
| 108 | spinlock_t dhd_lock; |
| 109 | #ifdef BCMDBUS |
| 110 | ulong txqlock_flags; |
| 111 | #else |
| 112 | |
| 113 | struct semaphore sdsem; |
| 114 | tsk_ctl_t thr_dpc_ctl; |
| 115 | tsk_ctl_t thr_wdt_ctl; |
| 116 | #endif /* BCMDBUS */ |
| 117 | |
| 118 | tsk_ctl_t thr_rxf_ctl; |
| 119 | spinlock_t rxf_lock; |
| 120 | bool rxthread_enabled; |
| 121 | |
| 122 | /* Wakelocks */ |
| 123 | #if defined(CONFIG_HAS_WAKELOCK) |
Stephen Chu | 3a53a73 | 2021-12-30 20:21:08 +0800 | [diff] [blame] | 124 | struct wakeup_source *wl_wifi; /* Wifi wakelock */ |
| 125 | struct wakeup_source *wl_rxwake; /* Wifi rx wakelock */ |
| 126 | struct wakeup_source *wl_ctrlwake; /* Wifi ctrl wakelock */ |
| 127 | struct wakeup_source *wl_wdwake; /* Wifi wd wakelock */ |
| 128 | struct wakeup_source *wl_evtwake; /* Wifi event wakelock */ |
| 129 | struct wakeup_source *wl_pmwake; /* Wifi pm handler wakelock */ |
| 130 | struct wakeup_source *wl_txflwake; /* Wifi tx flow wakelock */ |
Victor Hsu | e0cd0e7 | 2021-06-08 11:05:03 +0800 | [diff] [blame] | 131 | #ifdef BCMPCIE_OOB_HOST_WAKE |
Stephen Chu | 3a53a73 | 2021-12-30 20:21:08 +0800 | [diff] [blame] | 132 | struct wakeup_source *wl_intrwake; /* Host wakeup wakelock */ |
Victor Hsu | e0cd0e7 | 2021-06-08 11:05:03 +0800 | [diff] [blame] | 133 | #endif /* BCMPCIE_OOB_HOST_WAKE */ |
| 134 | #ifdef DHD_USE_SCAN_WAKELOCK |
Stephen Chu | 3a53a73 | 2021-12-30 20:21:08 +0800 | [diff] [blame] | 135 | struct wakeup_source *wl_scanwake; /* Wifi scan wakelock */ |
Victor Hsu | e0cd0e7 | 2021-06-08 11:05:03 +0800 | [diff] [blame] | 136 | #endif /* DHD_USE_SCAN_WAKELOCK */ |
Stephen Chu | 3a53a73 | 2021-12-30 20:21:08 +0800 | [diff] [blame] | 137 | struct wakeup_source *wl_nanwake; /* NAN wakelock */ |
Victor Hsu | e0cd0e7 | 2021-06-08 11:05:03 +0800 | [diff] [blame] | 138 | #endif /* CONFIG_HAS_WAKELOCK */ |
| 139 | |
| 140 | #if defined(OEM_ANDROID) |
| 141 | /* net_device interface lock, prevent race conditions among net_dev interface |
| 142 | * calls and wifi_on or wifi_off |
| 143 | */ |
| 144 | struct mutex dhd_net_if_mutex; |
| 145 | struct mutex dhd_suspend_mutex; |
Terry Chen | 0542508 | 2021-10-22 20:23:22 +0800 | [diff] [blame] | 146 | #if defined(APF) |
Victor Hsu | e0cd0e7 | 2021-06-08 11:05:03 +0800 | [diff] [blame] | 147 | struct mutex dhd_apf_mutex; |
Terry Chen | 0542508 | 2021-10-22 20:23:22 +0800 | [diff] [blame] | 148 | #endif /* APF */ |
Victor Hsu | e0cd0e7 | 2021-06-08 11:05:03 +0800 | [diff] [blame] | 149 | #endif /* OEM_ANDROID */ |
| 150 | spinlock_t wakelock_spinlock; |
| 151 | spinlock_t wakelock_evt_spinlock; |
| 152 | uint32 wakelock_counter; |
| 153 | int wakelock_wd_counter; |
| 154 | int wakelock_rx_timeout_enable; |
| 155 | int wakelock_ctrl_timeout_enable; |
| 156 | bool waive_wakelock; |
| 157 | uint32 wakelock_before_waive; |
| 158 | |
| 159 | /* Thread to issue ioctl for multicast */ |
| 160 | wait_queue_head_t ctrl_wait; |
| 161 | atomic_t pend_8021x_cnt; |
| 162 | dhd_attach_states_t dhd_state; |
| 163 | #ifdef SHOW_LOGTRACE |
| 164 | dhd_event_log_t event_data; |
| 165 | #endif /* SHOW_LOGTRACE */ |
| 166 | |
| 167 | #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) |
| 168 | struct early_suspend early_suspend; |
| 169 | #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */ |
| 170 | |
| 171 | #ifdef ARP_OFFLOAD_SUPPORT |
| 172 | u32 pend_ipaddr; |
| 173 | #endif /* ARP_OFFLOAD_SUPPORT */ |
| 174 | #ifdef DHDTCPACK_SUPPRESS |
| 175 | spinlock_t tcpack_lock; |
| 176 | #endif /* DHDTCPACK_SUPPRESS */ |
| 177 | #ifdef FIX_CPU_MIN_CLOCK |
| 178 | bool cpufreq_fix_status; |
| 179 | struct mutex cpufreq_fix; |
| 180 | struct pm_qos_request dhd_cpu_qos; |
| 181 | #ifdef FIX_BUS_MIN_CLOCK |
| 182 | struct pm_qos_request dhd_bus_qos; |
| 183 | #endif /* FIX_BUS_MIN_CLOCK */ |
| 184 | #endif /* FIX_CPU_MIN_CLOCK */ |
| 185 | void *dhd_deferred_wq; |
| 186 | #if (defined(BCM_ROUTER_DHD) && defined(HNDCTF)) |
| 187 | ctf_t *cih; /* ctf instance handle */ |
| 188 | ctf_brc_hot_t *brc_hot; /* hot ctf bridge cache entry */ |
| 189 | #endif /* BCM_ROUTER_DHD && HNDCTF */ |
| 190 | #ifdef DEBUG_CPU_FREQ |
| 191 | struct notifier_block freq_trans; |
| 192 | int __percpu *new_freq; |
| 193 | #endif |
| 194 | unsigned int unit; |
| 195 | struct notifier_block pm_notifier; |
| 196 | #ifdef DHD_PSTA |
| 197 | uint32 psta_mode; /* PSTA or PSR */ |
| 198 | #endif /* DHD_PSTA */ |
| 199 | #ifdef DHD_WET |
| 200 | uint32 wet_mode; |
| 201 | #endif /* DHD_WET */ |
| 202 | #ifdef DHD_DEBUG |
| 203 | dhd_dump_t *dump; |
| 204 | timer_list_compat_t join_timer; |
| 205 | u32 join_timeout_val; |
| 206 | bool join_timer_active; |
| 207 | uint scan_time_count; |
| 208 | timer_list_compat_t scan_timer; |
| 209 | bool scan_timer_active; |
| 210 | #endif |
| 211 | struct delayed_work dhd_dpc_dispatcher_work; |
| 212 | |
| 213 | /* CPU on which the DHD DPC is running */ |
| 214 | atomic_t dpc_cpu; |
| 215 | atomic_t prev_dpc_cpu; |
| 216 | #if defined(DHD_LB) |
| 217 | #if defined(DHD_LB_HOST_CTRL) |
| 218 | bool permitted_primary_cpu; |
| 219 | #endif /* DHD_LB_HOST_CTRL */ |
| 220 | /* CPU Load Balance dynamic CPU selection */ |
| 221 | |
| 222 | /* Variable that tracks the currect CPUs available for candidacy */ |
| 223 | cpumask_var_t cpumask_curr_avail; |
| 224 | |
| 225 | /* Primary and secondary CPU mask */ |
| 226 | cpumask_var_t cpumask_primary, cpumask_secondary; /* configuration */ |
| 227 | cpumask_var_t cpumask_primary_new, cpumask_secondary_new; /* temp */ |
| 228 | |
| 229 | struct notifier_block cpu_notifier; |
| 230 | |
| 231 | /* Napi struct for handling rx packet sendup. Packets are removed from |
| 232 | * H2D RxCompl ring and placed into rx_pend_queue. rx_pend_queue is then |
| 233 | * appended to rx_napi_queue (w/ lock) and the rx_napi_struct is scheduled |
| 234 | * to run to rx_napi_cpu. |
| 235 | */ |
| 236 | struct sk_buff_head rx_pend_queue ____cacheline_aligned; |
| 237 | struct sk_buff_head rx_napi_queue ____cacheline_aligned; |
| 238 | struct sk_buff_head rx_process_queue ____cacheline_aligned; |
| 239 | struct napi_struct rx_napi_struct ____cacheline_aligned; |
| 240 | atomic_t rx_napi_cpu; /* cpu on which the napi is dispatched */ |
| 241 | struct net_device *rx_napi_netdev; /* netdev of primary interface */ |
| 242 | |
| 243 | struct work_struct rx_napi_dispatcher_work; |
| 244 | struct work_struct tx_compl_dispatcher_work; |
| 245 | struct work_struct tx_dispatcher_work; |
| 246 | struct work_struct rx_compl_dispatcher_work; |
| 247 | |
| 248 | /* Number of times DPC Tasklet ran */ |
| 249 | uint32 dhd_dpc_cnt; |
| 250 | /* Number of times NAPI processing got scheduled */ |
| 251 | uint32 napi_sched_cnt; |
| 252 | /* NAPI latency stats */ |
| 253 | uint64 *napi_latency; |
| 254 | uint64 napi_schedule_time; |
| 255 | /* Number of times NAPI processing ran on each available core */ |
| 256 | uint32 *napi_percpu_run_cnt; |
| 257 | /* Number of times RX Completions got scheduled */ |
| 258 | uint32 rxc_sched_cnt; |
| 259 | /* Number of times RX Completion ran on each available core */ |
| 260 | uint32 *rxc_percpu_run_cnt; |
| 261 | /* Number of times TX Completions got scheduled */ |
| 262 | uint32 txc_sched_cnt; |
| 263 | /* Number of times TX Completions ran on each available core */ |
| 264 | uint32 *txc_percpu_run_cnt; |
| 265 | /* CPU status */ |
| 266 | /* Number of times each CPU came online */ |
| 267 | uint32 *cpu_online_cnt; |
| 268 | /* Number of times each CPU went offline */ |
| 269 | uint32 *cpu_offline_cnt; |
| 270 | |
| 271 | /* Number of times TX processing run on each core */ |
| 272 | uint32 *txp_percpu_run_cnt; |
| 273 | /* Number of times TX start run on each core */ |
| 274 | uint32 *tx_start_percpu_run_cnt; |
| 275 | |
| 276 | /* Tx load balancing */ |
| 277 | |
| 278 | /* TODO: Need to see if batch processing is really required in case of TX |
| 279 | * processing. In case of RX the Dongle can send a bunch of rx completions, |
| 280 | * hence we took a 3 queue approach |
| 281 | * enque - adds the skbs to rx_pend_queue |
| 282 | * dispatch - uses a lock and adds the list of skbs from pend queue to |
| 283 | * napi queue |
| 284 | * napi processing - copies the pend_queue into a local queue and works |
| 285 | * on it. |
| 286 | * But for TX its going to be 1 skb at a time, so we are just thinking |
| 287 | * of using only one queue and use the lock supported skb queue functions |
| 288 | * to add and process it. If its in-efficient we'll re-visit the queue |
| 289 | * design. |
| 290 | */ |
| 291 | |
| 292 | /* When the NET_TX tries to send a TX packet put it into tx_pend_queue */ |
| 293 | /* struct sk_buff_head tx_pend_queue ____cacheline_aligned; */ |
| 294 | /* |
| 295 | * From the Tasklet that actually sends out data |
| 296 | * copy the list tx_pend_queue into tx_active_queue. There by we need |
| 297 | * to spinlock to only perform the copy the rest of the code ie to |
| 298 | * construct the tx_pend_queue and the code to process tx_active_queue |
| 299 | * can be lockless. The concept is borrowed as is from RX processing |
| 300 | */ |
| 301 | /* struct sk_buff_head tx_active_queue ____cacheline_aligned; */ |
| 302 | |
| 303 | /* Control TXP in runtime, enable by default */ |
| 304 | atomic_t lb_txp_active; |
| 305 | |
| 306 | /* Control RXP in runtime, enable by default */ |
| 307 | atomic_t lb_rxp_active; |
| 308 | |
| 309 | /* |
| 310 | * When the NET_TX tries to send a TX packet put it into tx_pend_queue |
| 311 | * For now, the processing tasklet will also direcly operate on this |
| 312 | * queue |
| 313 | */ |
| 314 | struct sk_buff_head tx_pend_queue ____cacheline_aligned; |
| 315 | |
| 316 | /* cpu on which the DHD Tx is happenning */ |
| 317 | atomic_t tx_cpu; |
| 318 | |
| 319 | /* CPU on which the Network stack is calling the DHD's xmit function */ |
| 320 | atomic_t net_tx_cpu; |
| 321 | |
| 322 | /* Tasklet context from which the DHD's TX processing happens */ |
| 323 | struct tasklet_struct tx_tasklet; |
| 324 | |
| 325 | /* |
| 326 | * Consumer Histogram - NAPI RX Packet processing |
| 327 | * ----------------------------------------------- |
| 328 | * On Each CPU, when the NAPI RX Packet processing call back was invoked |
| 329 | * how many packets were processed is captured in this data structure. |
| 330 | * Now its difficult to capture the "exact" number of packets processed. |
| 331 | * So considering the packet counter to be a 32 bit one, we have a |
| 332 | * bucket with 8 bins (2^1, 2^2 ... 2^8). The "number" of packets |
| 333 | * processed is rounded off to the next power of 2 and put in the |
| 334 | * approriate "bin" the value in the bin gets incremented. |
| 335 | * For example, assume that in CPU 1 if NAPI Rx runs 3 times |
| 336 | * and the packet count processed is as follows (assume the bin counters are 0) |
| 337 | * iteration 1 - 10 (the bin counter 2^4 increments to 1) |
| 338 | * iteration 2 - 30 (the bin counter 2^5 increments to 1) |
| 339 | * iteration 3 - 15 (the bin counter 2^4 increments by 1 to become 2) |
| 340 | */ |
| 341 | uint32 *napi_rx_hist[HIST_BIN_SIZE]; |
| 342 | uint32 *txc_hist[HIST_BIN_SIZE]; |
| 343 | uint32 *rxc_hist[HIST_BIN_SIZE]; |
| 344 | struct kobject dhd_lb_kobj; |
| 345 | bool dhd_lb_candidacy_override; |
| 346 | #endif /* DHD_LB */ |
| 347 | #if defined(DNGL_AXI_ERROR_LOGGING) && defined(DHD_USE_WQ_FOR_DNGL_AXI_ERROR) |
| 348 | struct work_struct axi_error_dispatcher_work; |
| 349 | #endif /* DNGL_AXI_ERROR_LOGGING && DHD_USE_WQ_FOR_DNGL_AXI_ERROR */ |
| 350 | #ifdef SHOW_LOGTRACE |
| 351 | #ifdef DHD_USE_KTHREAD_FOR_LOGTRACE |
| 352 | tsk_ctl_t thr_logtrace_ctl; |
| 353 | #else |
| 354 | struct delayed_work event_log_dispatcher_work; |
| 355 | #endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */ |
| 356 | #endif /* SHOW_LOGTRACE */ |
| 357 | |
| 358 | #ifdef BTLOG |
| 359 | struct work_struct bt_log_dispatcher_work; |
| 360 | #endif /* SHOW_LOGTRACE */ |
| 361 | #ifdef EWP_EDL |
| 362 | struct delayed_work edl_dispatcher_work; |
| 363 | #endif |
| 364 | #if defined(WLAN_ACCEL_BOOT) |
| 365 | int fs_check_retry; |
| 366 | struct delayed_work wl_accel_work; |
| 367 | bool wl_accel_force_reg_on; |
| 368 | bool wl_accel_boot_on_done; |
| 369 | #endif |
| 370 | #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) |
| 371 | #if defined(BCMDBUS) |
| 372 | struct task_struct *fw_download_task; |
| 373 | struct semaphore fw_download_lock; |
| 374 | #endif /* BCMDBUS */ |
| 375 | #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */ |
| 376 | struct kobject dhd_kobj; |
| 377 | timer_list_compat_t timesync_timer; |
| 378 | #if defined(BT_OVER_SDIO) |
| 379 | char btfw_path[PATH_MAX]; |
| 380 | #endif /* defined (BT_OVER_SDIO) */ |
| 381 | #ifdef WL_MONITOR |
| 382 | struct net_device *monitor_dev; /* monitor pseudo device */ |
| 383 | struct sk_buff *monitor_skb; |
| 384 | uint monitor_len; |
| 385 | uint monitor_type; /* monitor pseudo device */ |
| 386 | #ifdef HOST_RADIOTAP_CONV |
| 387 | monitor_info_t *monitor_info; |
| 388 | uint host_radiotap_conv; |
| 389 | #endif /* HOST_RADIOTAP_CONV */ |
| 390 | #endif /* WL_MONITOR */ |
| 391 | #if defined (BT_OVER_SDIO) |
| 392 | struct mutex bus_user_lock; /* lock for sdio bus apis shared between WLAN & BT */ |
| 393 | int bus_user_count; /* User counts of sdio bus shared between WLAN & BT */ |
| 394 | #endif /* BT_OVER_SDIO */ |
| 395 | #ifdef SHOW_LOGTRACE |
| 396 | struct sk_buff_head evt_trace_queue ____cacheline_aligned; |
| 397 | #endif |
| 398 | #ifdef DHD_PCIE_NATIVE_RUNTIMEPM |
| 399 | struct workqueue_struct *tx_wq; |
| 400 | struct workqueue_struct *rx_wq; |
| 401 | #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ |
| 402 | #ifdef BTLOG |
| 403 | struct sk_buff_head bt_log_queue ____cacheline_aligned; |
| 404 | #endif /* BTLOG */ |
| 405 | #ifdef PCIE_INB_DW |
| 406 | wait_queue_head_t ds_exit_wait; |
| 407 | #endif /* PCIE_INB_DW */ |
| 408 | #ifdef DHD_DEBUG_UART |
| 409 | bool duart_execute; |
| 410 | #endif /* DHD_DEBUG_UART */ |
| 411 | #ifdef BT_OVER_PCIE |
| 412 | struct mutex quiesce_flr_lock; |
| 413 | struct mutex quiesce_lock; |
| 414 | enum dhd_bus_quiesce_state dhd_quiesce_state; |
| 415 | #endif /* BT_OVER_PCIE */ |
| 416 | struct mutex logdump_lock; |
| 417 | #if defined(GDB_PROXY) && defined(PCIE_FULL_DONGLE) && defined(BCMINTERNAL) |
| 418 | /* Root directory for GDB Proxy's (proc)fs files, used by first (default) interface */ |
| 419 | struct proc_dir_entry *gdb_proxy_fs_root; |
| 420 | /* Name of procfs root directory */ |
| 421 | char gdb_proxy_fs_root_name[100]; |
| 422 | #endif /* defined(GDB_PROXY) && defined(PCIE_FULL_DONGLE) && defined(BCMINTERNAL) */ |
| 423 | #if defined(DHD_MQ) && defined(DHD_MQ_STATS) |
| 424 | uint64 pktcnt_qac_histo[MQ_MAX_QUEUES][AC_COUNT]; |
| 425 | uint64 pktcnt_per_ac[AC_COUNT]; |
| 426 | uint64 cpu_qstats[MQ_MAX_QUEUES][MQ_MAX_CPUS]; |
| 427 | #endif /* DHD_MQ && DHD_MQ_STATS */ |
| 428 | /* indicates mem_dump was scheduled as work queue or called directly */ |
| 429 | bool scheduled_memdump; |
| 430 | #ifdef DHD_PKTTS |
| 431 | bool latency; /* pktts enab flag */ |
| 432 | pktts_flow_t config[PKTTS_CONFIG_MAX]; /* pktts user config */ |
| 433 | #endif /* DHD_PKTTS */ |
| 434 | struct work_struct dhd_hang_process_work; |
| 435 | #ifdef DHD_HP2P |
| 436 | spinlock_t hp2p_lock; |
| 437 | #endif /* DHD_HP2P */ |
| 438 | #ifdef DHD_QOS_ON_SOCK_FLOW |
| 439 | struct dhd_sock_qos_info *psk_qos; |
| 440 | #endif |
Terry Chen | 3f182ae | 2021-07-23 14:30:20 +0800 | [diff] [blame] | 441 | #ifdef WL_CFGVENDOR_SEND_ALERT_EVENT |
| 442 | struct work_struct dhd_alert_process_work; |
| 443 | #endif /* WL_CFGVENDOR_SEND_ALERT_EVENT */ |
Victor Hsu | e0cd0e7 | 2021-06-08 11:05:03 +0800 | [diff] [blame] | 444 | } dhd_info_t; |
| 445 | |
| 446 | #ifdef WL_MONITOR |
| 447 | #define MONPKT_EXTRA_LEN 48u |
| 448 | #endif /* WL_MONITOR */ |
| 449 | |
| 450 | extern int dhd_sysfs_init(dhd_info_t *dhd); |
| 451 | extern void dhd_sysfs_exit(dhd_info_t *dhd); |
| 452 | extern void dhd_dbg_ring_proc_create(dhd_pub_t *dhdp); |
| 453 | extern void dhd_dbg_ring_proc_destroy(dhd_pub_t *dhdp); |
| 454 | |
| 455 | int __dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf); |
| 456 | |
| 457 | void dhd_dpc_tasklet_dispatcher_work(struct work_struct * work); |
| 458 | #if defined(DHD_LB) |
| 459 | #if defined(DHD_LB_TXP) |
| 460 | int dhd_lb_sendpkt(dhd_info_t *dhd, struct net_device *net, int ifidx, void *skb); |
| 461 | void dhd_tx_dispatcher_work(struct work_struct * work); |
| 462 | void dhd_tx_dispatcher_fn(dhd_pub_t *dhdp); |
| 463 | void dhd_lb_tx_dispatch(dhd_pub_t *dhdp); |
| 464 | void dhd_lb_tx_handler(unsigned long data); |
| 465 | #endif /* DHD_LB_TXP */ |
| 466 | |
| 467 | #if defined(DHD_LB_RXP) |
| 468 | int dhd_napi_poll(struct napi_struct *napi, int budget); |
| 469 | void dhd_rx_napi_dispatcher_work(struct work_struct * work); |
| 470 | void dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp); |
| 471 | void dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx); |
| 472 | unsigned long dhd_read_lb_rxp(dhd_pub_t *dhdp); |
| 473 | #endif /* DHD_LB_RXP */ |
| 474 | |
| 475 | void dhd_lb_set_default_cpus(dhd_info_t *dhd); |
| 476 | void dhd_cpumasks_deinit(dhd_info_t *dhd); |
| 477 | int dhd_cpumasks_init(dhd_info_t *dhd); |
| 478 | |
| 479 | void dhd_select_cpu_candidacy(dhd_info_t *dhd); |
| 480 | |
| 481 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) |
| 482 | int dhd_cpu_startup_callback(unsigned int cpu); |
| 483 | int dhd_cpu_teardown_callback(unsigned int cpu); |
| 484 | #else |
| 485 | int dhd_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu); |
| 486 | #endif /* LINUX_VERSION_CODE < 4.10.0 */ |
| 487 | |
| 488 | int dhd_register_cpuhp_callback(dhd_info_t *dhd); |
| 489 | int dhd_unregister_cpuhp_callback(dhd_info_t *dhd); |
| 490 | #endif /* DHD_LB */ |
| 491 | |
| 492 | #if defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON) |
| 493 | void dhd_irq_set_affinity(dhd_pub_t *dhdp, const struct cpumask *cpumask); |
| 494 | #endif /* DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */ |
| 495 | #ifdef DHD_SSSR_DUMP |
| 496 | extern uint sssr_enab; |
| 497 | extern uint fis_enab; |
| 498 | #endif /* DHD_SSSR_DUMP */ |
| 499 | |
Stephen Chu | 3a53a73 | 2021-12-30 20:21:08 +0800 | [diff] [blame] | 500 | /* |
| 501 | * Some android arch platforms backported wakelock APIs from kernel 5.4.0 |
| 502 | * Since their minor versions are changed in the Android R OS |
| 503 | * Added defines for these platforms |
| 504 | * 4.19.81 -> 4.19.110, 4.14.78 -> 4.14.170 |
| 505 | */ |
| 506 | #if ((defined(BOARD_HIKEY) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 96))) || \ |
| 507 | (defined(CONFIG_ARCH_MSM) && (((LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 170)) && \ |
| 508 | (LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0))) || (LINUX_VERSION_CODE >= \ |
| 509 | KERNEL_VERSION(4, 19, 110)))) || defined(CONFIG_SOC_EXYNOS3830)) |
| 510 | #define WAKELOCK_BACKPORT |
| 511 | #endif /* WAKELOCK_BACKPORT */ |
| 512 | |
Victor Hsu | e0cd0e7 | 2021-06-08 11:05:03 +0800 | [diff] [blame] | 513 | #ifdef CONFIG_HAS_WAKELOCK |
Stephen Chu | 3a53a73 | 2021-12-30 20:21:08 +0800 | [diff] [blame] | 514 | #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) || defined(WAKELOCK_BACKPORT)) |
| 515 | #define dhd_wake_lock_init(wakeup_source, dev, name) \ |
| 516 | do { \ |
| 517 | wakeup_source = wakeup_source_register(dev, name); \ |
| 518 | } while (0); |
| 519 | #else |
| 520 | #define dhd_wake_lock_init(wakeup_source, dev, name) \ |
| 521 | do { \ |
| 522 | wakeup_source = wakeup_source_register(name); \ |
| 523 | } while (0); |
| 524 | #endif /* LINUX_VERSION >= 5.4.0 */ |
| 525 | #define dhd_wake_lock_destroy(wakeup_source) \ |
| 526 | do { \ |
| 527 | wakeup_source_unregister(wakeup_source); \ |
| 528 | } while (0); |
Victor Hsu | e0cd0e7 | 2021-06-08 11:05:03 +0800 | [diff] [blame] | 529 | #define dhd_wake_lock(wakeup_source) __pm_stay_awake(wakeup_source) |
| 530 | #define dhd_wake_unlock(wakeup_source) __pm_relax(wakeup_source) |
| 531 | #define dhd_wake_lock_active(wakeup_source) ((wakeup_source)->active) |
| 532 | #define dhd_wake_lock_timeout(wakeup_source, timeout) \ |
| 533 | __pm_wakeup_event(wakeup_source, jiffies_to_msecs(timeout)) |
| 534 | #endif /* CONFIG_HAS_WAKELOCK */ |
| 535 | |
| 536 | #endif /* __DHD_LINUX_PRIV_H__ */ |