blob: 9d8fc29b18974cea294f00d937e06c054c2a04a4 [file] [log] [blame]
Kalle Valo5e3dd152013-06-12 20:52:10 +03001/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#ifndef _CORE_H_
19#define _CORE_H_
20
21#include <linux/completion.h>
22#include <linux/if_ether.h>
23#include <linux/types.h>
24#include <linux/pci.h>
25
Michal Kazioredb82362013-07-05 16:15:14 +030026#include "htt.h"
Kalle Valo5e3dd152013-06-12 20:52:10 +030027#include "htc.h"
28#include "hw.h"
29#include "targaddrs.h"
30#include "wmi.h"
31#include "../ath.h"
32#include "../regd.h"
Janusz Dziedzic9702c682013-11-20 09:59:41 +020033#include "../dfs_pattern_detector.h"
Kalle Valo5e3dd152013-06-12 20:52:10 +030034
35#define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB)
36#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
37#define WO(_f) ((_f##_OFFSET) >> 2)
38
39#define ATH10K_SCAN_ID 0
40#define WMI_READY_TIMEOUT (5 * HZ)
41#define ATH10K_FLUSH_TIMEOUT_HZ (5*HZ)
Michal Kazior2e1dea42013-07-31 10:32:40 +020042#define ATH10K_NUM_CHANS 38
Kalle Valo5e3dd152013-06-12 20:52:10 +030043
44/* Antenna noise floor */
45#define ATH10K_DEFAULT_NOISE_FLOOR -95
46
Bartosz Markowski71098612013-11-14 09:01:15 +010047#define ATH10K_MAX_NUM_MGMT_PENDING 128
Bartosz Markowski5e00d312013-09-26 17:47:12 +020048
Kalle Valo5e3dd152013-06-12 20:52:10 +030049struct ath10k;
50
Kalle Valo5e3dd152013-06-12 20:52:10 +030051struct ath10k_skb_cb {
52 dma_addr_t paddr;
53 bool is_mapped;
54 bool is_aborted;
Bartosz Markowski5e00d312013-09-26 17:47:12 +020055 u8 vdev_id;
Kalle Valo5e3dd152013-06-12 20:52:10 +030056
57 struct {
Kalle Valo5e3dd152013-06-12 20:52:10 +030058 u8 tid;
59 bool is_offchan;
Michal Kazior1f8bb152013-09-18 14:43:22 +020060
61 u8 frag_len;
62 u8 pad_len;
Kalle Valo5e3dd152013-06-12 20:52:10 +030063 } __packed htt;
Kalle Valo5e3dd152013-06-12 20:52:10 +030064} __packed;
65
66static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb)
67{
68 BUILD_BUG_ON(sizeof(struct ath10k_skb_cb) >
69 IEEE80211_TX_INFO_DRIVER_DATA_SIZE);
70 return (struct ath10k_skb_cb *)&IEEE80211_SKB_CB(skb)->driver_data;
71}
72
73static inline int ath10k_skb_map(struct device *dev, struct sk_buff *skb)
74{
75 if (ATH10K_SKB_CB(skb)->is_mapped)
76 return -EINVAL;
77
78 ATH10K_SKB_CB(skb)->paddr = dma_map_single(dev, skb->data, skb->len,
79 DMA_TO_DEVICE);
80
81 if (unlikely(dma_mapping_error(dev, ATH10K_SKB_CB(skb)->paddr)))
82 return -EIO;
83
84 ATH10K_SKB_CB(skb)->is_mapped = true;
85 return 0;
86}
87
88static inline int ath10k_skb_unmap(struct device *dev, struct sk_buff *skb)
89{
90 if (!ATH10K_SKB_CB(skb)->is_mapped)
91 return -EINVAL;
92
93 dma_unmap_single(dev, ATH10K_SKB_CB(skb)->paddr, skb->len,
94 DMA_TO_DEVICE);
95 ATH10K_SKB_CB(skb)->is_mapped = false;
96 return 0;
97}
98
99static inline u32 host_interest_item_address(u32 item_offset)
100{
101 return QCA988X_HOST_INTEREST_ADDRESS + item_offset;
102}
103
104struct ath10k_bmi {
105 bool done_sent;
106};
107
Bartosz Markowskib3effe62013-09-26 17:47:11 +0200108#define ATH10K_MAX_MEM_REQS 16
109
110struct ath10k_mem_chunk {
111 void *vaddr;
112 dma_addr_t paddr;
113 u32 len;
114 u32 req_id;
115};
116
Kalle Valo5e3dd152013-06-12 20:52:10 +0300117struct ath10k_wmi {
118 enum ath10k_htc_ep_id eid;
119 struct completion service_ready;
120 struct completion unified_ready;
Michal Kaziorbe8b3942013-09-13 14:16:54 +0200121 wait_queue_head_t tx_credits_wq;
Bartosz Markowskice428702013-09-26 17:47:05 +0200122 struct wmi_cmd_map *cmd;
Bartosz Markowski6d1506e2013-09-26 17:47:15 +0200123 struct wmi_vdev_param_map *vdev_param;
Bartosz Markowski226a3392013-09-26 17:47:16 +0200124 struct wmi_pdev_param_map *pdev_param;
Bartosz Markowskib3effe62013-09-26 17:47:11 +0200125
126 u32 num_mem_chunks;
127 struct ath10k_mem_chunk mem_chunks[ATH10K_MAX_MEM_REQS];
Kalle Valo5e3dd152013-06-12 20:52:10 +0300128};
129
130struct ath10k_peer_stat {
131 u8 peer_macaddr[ETH_ALEN];
132 u32 peer_rssi;
133 u32 peer_tx_rate;
134};
135
136struct ath10k_target_stats {
137 /* PDEV stats */
138 s32 ch_noise_floor;
139 u32 tx_frame_count;
140 u32 rx_frame_count;
141 u32 rx_clear_count;
142 u32 cycle_count;
143 u32 phy_err_count;
144 u32 chan_tx_power;
145
146 /* PDEV TX stats */
147 s32 comp_queued;
148 s32 comp_delivered;
149 s32 msdu_enqued;
150 s32 mpdu_enqued;
151 s32 wmm_drop;
152 s32 local_enqued;
153 s32 local_freed;
154 s32 hw_queued;
155 s32 hw_reaped;
156 s32 underrun;
157 s32 tx_abort;
158 s32 mpdus_requed;
159 u32 tx_ko;
160 u32 data_rc;
161 u32 self_triggers;
162 u32 sw_retry_failure;
163 u32 illgl_rate_phy_err;
164 u32 pdev_cont_xretry;
165 u32 pdev_tx_timeout;
166 u32 pdev_resets;
167 u32 phy_underrun;
168 u32 txop_ovf;
169
170 /* PDEV RX stats */
171 s32 mid_ppdu_route_change;
172 s32 status_rcvd;
173 s32 r0_frags;
174 s32 r1_frags;
175 s32 r2_frags;
176 s32 r3_frags;
177 s32 htt_msdus;
178 s32 htt_mpdus;
179 s32 loc_msdus;
180 s32 loc_mpdus;
181 s32 oversize_amsdu;
182 s32 phy_errs;
183 s32 phy_err_drop;
184 s32 mpdu_errs;
185
186 /* VDEV STATS */
187
188 /* PEER STATS */
189 u8 peers;
190 struct ath10k_peer_stat peer_stat[TARGET_NUM_PEERS];
191
192 /* TODO: Beacon filter stats */
193
194};
195
Janusz Dziedzic9702c682013-11-20 09:59:41 +0200196struct ath10k_dfs_stats {
197 u32 phy_errors;
198 u32 pulses_total;
199 u32 pulses_detected;
200 u32 pulses_discarded;
201 u32 radar_detected;
202};
203
Kalle Valo5e3dd152013-06-12 20:52:10 +0300204#define ATH10K_MAX_NUM_PEER_IDS (1 << 11) /* htt rx_desc limit */
205
206struct ath10k_peer {
207 struct list_head list;
208 int vdev_id;
209 u8 addr[ETH_ALEN];
210 DECLARE_BITMAP(peer_ids, ATH10K_MAX_NUM_PEER_IDS);
211 struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];
212};
213
214#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ)
215
216struct ath10k_vif {
Michal Kazior05791192013-10-16 15:44:45 +0300217 struct list_head list;
218
Kalle Valo5e3dd152013-06-12 20:52:10 +0300219 u32 vdev_id;
220 enum wmi_vdev_type vdev_type;
221 enum wmi_vdev_subtype vdev_subtype;
222 u32 beacon_interval;
223 u32 dtim_period;
Michal Kaziored543882013-09-13 14:16:56 +0200224 struct sk_buff *beacon;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300225
226 struct ath10k *ar;
227 struct ieee80211_vif *vif;
228
Michal Kaziorcc4827b2013-10-16 15:44:45 +0300229 struct work_struct wep_key_work;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300230 struct ieee80211_key_conf *wep_keys[WMI_MAX_KEY_INDEX + 1];
Michal Kaziorcc4827b2013-10-16 15:44:45 +0300231 u8 def_wep_key_idx;
232 u8 def_wep_key_newidx;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300233
234 u16 tx_seq_no;
235
236 union {
237 struct {
238 u8 bssid[ETH_ALEN];
239 u32 uapsd;
240 } sta;
241 struct {
242 /* 127 stations; wmi limit */
243 u8 tim_bitmap[16];
244 u8 tim_len;
245 u32 ssid_len;
246 u8 ssid[IEEE80211_MAX_SSID_LEN];
247 bool hidden_ssid;
248 /* P2P_IE with NoA attribute for P2P_GO case */
249 u32 noa_len;
250 u8 *noa_data;
251 } ap;
252 struct {
253 u8 bssid[ETH_ALEN];
254 } ibss;
255 } u;
256};
257
258struct ath10k_vif_iter {
259 u32 vdev_id;
260 struct ath10k_vif *arvif;
261};
262
263struct ath10k_debug {
264 struct dentry *debugfs_phy;
265
266 struct ath10k_target_stats target_stats;
267 u32 wmi_service_bitmap[WMI_SERVICE_BM_SIZE];
268
269 struct completion event_stats_compl;
Kalle Valoa3d135e2013-09-03 11:44:10 +0300270
271 unsigned long htt_stats_mask;
272 struct delayed_work htt_stats_dwork;
Janusz Dziedzic9702c682013-11-20 09:59:41 +0200273 struct ath10k_dfs_stats dfs_stats;
274 struct ath_dfs_pool_stats dfs_pool_stats;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300275};
276
Michal Kaziorf7843d72013-07-16 09:38:52 +0200277enum ath10k_state {
278 ATH10K_STATE_OFF = 0,
279 ATH10K_STATE_ON,
Michal Kazioraffd3212013-07-16 09:54:35 +0200280
281 /* When doing firmware recovery the device is first powered down.
282 * mac80211 is supposed to call in to start() hook later on. It is
283 * however possible that driver unloading and firmware crash overlap.
284 * mac80211 can wait on conf_mutex in stop() while the device is
285 * stopped in ath10k_core_restart() work holding conf_mutex. The state
286 * RESTARTED means that the device is up and mac80211 has started hw
287 * reconfiguration. Once mac80211 is done with the reconfiguration we
288 * set the state to STATE_ON in restart_complete(). */
289 ATH10K_STATE_RESTARTING,
290 ATH10K_STATE_RESTARTED,
291
292 /* The device has crashed while restarting hw. This state is like ON
293 * but commands are blocked in HTC and -ECOMM response is given. This
294 * prevents completion timeouts and makes the driver more responsive to
295 * userspace commands. This is also prevents recursive recovery. */
296 ATH10K_STATE_WEDGED,
Michal Kaziorf7843d72013-07-16 09:38:52 +0200297};
298
Michal Kazior0d9b0432013-08-09 10:13:33 +0200299enum ath10k_fw_features {
300 /* wmi_mgmt_rx_hdr contains extra RSSI information */
301 ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX = 0,
302
Bartosz Markowskice428702013-09-26 17:47:05 +0200303 /* firmware from 10X branch */
304 ATH10K_FW_FEATURE_WMI_10X = 1,
305
Bartosz Markowski5e00d312013-09-26 17:47:12 +0200306 /* firmware support tx frame management over WMI, otherwise it's HTT */
307 ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX = 2,
308
Bartosz Markowskid3541812013-12-10 16:20:40 +0100309 /* Firmware does not support P2P */
310 ATH10K_FW_FEATURE_NO_P2P = 3,
311
Michal Kazior0d9b0432013-08-09 10:13:33 +0200312 /* keep last */
313 ATH10K_FW_FEATURE_COUNT,
314};
315
Marek Puzyniake8a50f82013-11-20 09:59:47 +0200316enum ath10k_dev_flags {
317 /* Indicates that ath10k device is during CAC phase of DFS */
318 ATH10K_CAC_RUNNING,
Kalle Valo650b91f2013-11-20 10:00:49 +0200319 ATH10K_FLAG_FIRST_BOOT_DONE,
Marek Puzyniake8a50f82013-11-20 09:59:47 +0200320};
321
Kalle Valo5e3dd152013-06-12 20:52:10 +0300322struct ath10k {
323 struct ath_common ath_common;
324 struct ieee80211_hw *hw;
325 struct device *dev;
326 u8 mac_addr[ETH_ALEN];
327
Kalle Valoe01ae682013-09-01 11:22:14 +0300328 u32 chip_id;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300329 u32 target_version;
330 u8 fw_version_major;
331 u32 fw_version_minor;
332 u16 fw_version_release;
333 u16 fw_version_build;
334 u32 phy_capability;
335 u32 hw_min_tx_power;
336 u32 hw_max_tx_power;
337 u32 ht_cap_info;
338 u32 vht_cap_info;
Michal Kazior8865bee42013-07-24 12:36:46 +0200339 u32 num_rf_chains;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300340
Michal Kazior0d9b0432013-08-09 10:13:33 +0200341 DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT);
342
Kalle Valo5e3dd152013-06-12 20:52:10 +0300343 struct targetdef *targetdef;
344 struct hostdef *hostdef;
345
346 bool p2p;
347
348 struct {
349 void *priv;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300350 const struct ath10k_hif_ops *ops;
351 } hif;
352
Kalle Valo5e3dd152013-06-12 20:52:10 +0300353 wait_queue_head_t event_queue;
354 bool is_target_paused;
355
356 struct ath10k_bmi bmi;
Michal Kazioredb82362013-07-05 16:15:14 +0300357 struct ath10k_wmi wmi;
Michal Kaziorcd003fa2013-07-05 16:15:13 +0300358 struct ath10k_htc htc;
Michal Kazioredb82362013-07-05 16:15:14 +0300359 struct ath10k_htt htt;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300360
361 struct ath10k_hw_params {
362 u32 id;
363 const char *name;
364 u32 patch_load_addr;
365
366 struct ath10k_hw_params_fw {
367 const char *dir;
368 const char *fw;
369 const char *otp;
370 const char *board;
371 } fw;
372 } hw_params;
373
Kalle Valo36527912013-09-27 19:54:55 +0300374 const struct firmware *board;
Kalle Valo958df3a2013-09-27 19:55:01 +0300375 const void *board_data;
376 size_t board_len;
377
Michal Kazior29385052013-07-16 09:38:58 +0200378 const struct firmware *otp;
Kalle Valo958df3a2013-09-27 19:55:01 +0300379 const void *otp_data;
380 size_t otp_len;
381
Michal Kazior29385052013-07-16 09:38:58 +0200382 const struct firmware *firmware;
Kalle Valo958df3a2013-09-27 19:55:01 +0300383 const void *firmware_data;
384 size_t firmware_len;
Michal Kazior29385052013-07-16 09:38:58 +0200385
Kalle Valo1a222432013-09-27 19:55:07 +0300386 int fw_api;
387
Kalle Valo5e3dd152013-06-12 20:52:10 +0300388 struct {
389 struct completion started;
390 struct completion completed;
391 struct completion on_channel;
392 struct timer_list timeout;
393 bool is_roc;
394 bool in_progress;
395 bool aborting;
396 int vdev_id;
397 int roc_freq;
398 } scan;
399
400 struct {
401 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
402 } mac;
403
404 /* should never be NULL; needed for regular htt rx */
405 struct ieee80211_channel *rx_channel;
406
407 /* valid during scan; needed for mgmt rx during scan */
408 struct ieee80211_channel *scan_channel;
409
410 int free_vdev_map;
411 int monitor_vdev_id;
412 bool monitor_enabled;
413 bool monitor_present;
414 unsigned int filter_flags;
Marek Puzyniake8a50f82013-11-20 09:59:47 +0200415 unsigned long dev_flags;
Marek Puzyniak7d9b40b2013-11-20 10:00:28 +0200416 u32 dfs_block_radar_events;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300417
418 struct wmi_pdev_set_wmm_params_arg wmm_params;
419 struct completion install_key_done;
420
421 struct completion vdev_setup_done;
422
423 struct workqueue_struct *workqueue;
424
425 /* prevents concurrent FW reconfiguration */
426 struct mutex conf_mutex;
427
428 /* protects shared structure data */
429 spinlock_t data_lock;
430
Michal Kazior05791192013-10-16 15:44:45 +0300431 struct list_head arvifs;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300432 struct list_head peers;
433 wait_queue_head_t peer_mapping_wq;
434
Bartosz Markowski0e759f32014-01-02 14:38:33 +0100435 /* number of created peers; protected by data_lock */
436 int num_peers;
437
Kalle Valo5e3dd152013-06-12 20:52:10 +0300438 struct work_struct offchan_tx_work;
439 struct sk_buff_head offchan_tx_queue;
440 struct completion offchan_tx_completed;
441 struct sk_buff *offchan_tx_skb;
442
Bartosz Markowski5e00d312013-09-26 17:47:12 +0200443 struct work_struct wmi_mgmt_tx_work;
444 struct sk_buff_head wmi_mgmt_tx_queue;
445
Michal Kaziorf7843d72013-07-16 09:38:52 +0200446 enum ath10k_state state;
447
Michal Kazioraffd3212013-07-16 09:54:35 +0200448 struct work_struct restart_work;
449
Michal Kazior2e1dea42013-07-31 10:32:40 +0200450 /* cycle count is reported twice for each visited channel during scan.
451 * access protected by data_lock */
452 u32 survey_last_rx_clear_count;
453 u32 survey_last_cycle_count;
454 struct survey_info survey[ATH10K_NUM_CHANS];
455
Janusz Dziedzic9702c682013-11-20 09:59:41 +0200456 struct dfs_pattern_detector *dfs_detector;
457
Kalle Valo5e3dd152013-06-12 20:52:10 +0300458#ifdef CONFIG_ATH10K_DEBUGFS
459 struct ath10k_debug debug;
460#endif
461};
462
463struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
Kalle Valo5e3dd152013-06-12 20:52:10 +0300464 const struct ath10k_hif_ops *hif_ops);
465void ath10k_core_destroy(struct ath10k *ar);
466
Michal Kaziordd30a362013-07-16 09:38:51 +0200467int ath10k_core_start(struct ath10k *ar);
468void ath10k_core_stop(struct ath10k *ar);
Kalle Valoe01ae682013-09-01 11:22:14 +0300469int ath10k_core_register(struct ath10k *ar, u32 chip_id);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300470void ath10k_core_unregister(struct ath10k *ar);
471
Kalle Valo5e3dd152013-06-12 20:52:10 +0300472#endif /* _CORE_H_ */