blob: 1296dbe7d179026f19e33ea580aadbbcef5d7556 [file] [log] [blame]
Kalle Valo5e3dd152013-06-12 20:52:10 +03001/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include <linux/skbuff.h>
19
20#include "core.h"
21#include "htc.h"
22#include "debug.h"
23#include "wmi.h"
24#include "mac.h"
25
Bartosz Markowskice428702013-09-26 17:47:05 +020026/* MAIN WMI cmd track */
27static struct wmi_cmd_map wmi_cmd_map = {
28 .init_cmdid = WMI_INIT_CMDID,
29 .start_scan_cmdid = WMI_START_SCAN_CMDID,
30 .stop_scan_cmdid = WMI_STOP_SCAN_CMDID,
31 .scan_chan_list_cmdid = WMI_SCAN_CHAN_LIST_CMDID,
32 .scan_sch_prio_tbl_cmdid = WMI_SCAN_SCH_PRIO_TBL_CMDID,
33 .pdev_set_regdomain_cmdid = WMI_PDEV_SET_REGDOMAIN_CMDID,
34 .pdev_set_channel_cmdid = WMI_PDEV_SET_CHANNEL_CMDID,
35 .pdev_set_param_cmdid = WMI_PDEV_SET_PARAM_CMDID,
36 .pdev_pktlog_enable_cmdid = WMI_PDEV_PKTLOG_ENABLE_CMDID,
37 .pdev_pktlog_disable_cmdid = WMI_PDEV_PKTLOG_DISABLE_CMDID,
38 .pdev_set_wmm_params_cmdid = WMI_PDEV_SET_WMM_PARAMS_CMDID,
39 .pdev_set_ht_cap_ie_cmdid = WMI_PDEV_SET_HT_CAP_IE_CMDID,
40 .pdev_set_vht_cap_ie_cmdid = WMI_PDEV_SET_VHT_CAP_IE_CMDID,
41 .pdev_set_dscp_tid_map_cmdid = WMI_PDEV_SET_DSCP_TID_MAP_CMDID,
42 .pdev_set_quiet_mode_cmdid = WMI_PDEV_SET_QUIET_MODE_CMDID,
43 .pdev_green_ap_ps_enable_cmdid = WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID,
44 .pdev_get_tpc_config_cmdid = WMI_PDEV_GET_TPC_CONFIG_CMDID,
45 .pdev_set_base_macaddr_cmdid = WMI_PDEV_SET_BASE_MACADDR_CMDID,
46 .vdev_create_cmdid = WMI_VDEV_CREATE_CMDID,
47 .vdev_delete_cmdid = WMI_VDEV_DELETE_CMDID,
48 .vdev_start_request_cmdid = WMI_VDEV_START_REQUEST_CMDID,
49 .vdev_restart_request_cmdid = WMI_VDEV_RESTART_REQUEST_CMDID,
50 .vdev_up_cmdid = WMI_VDEV_UP_CMDID,
51 .vdev_stop_cmdid = WMI_VDEV_STOP_CMDID,
52 .vdev_down_cmdid = WMI_VDEV_DOWN_CMDID,
53 .vdev_set_param_cmdid = WMI_VDEV_SET_PARAM_CMDID,
54 .vdev_install_key_cmdid = WMI_VDEV_INSTALL_KEY_CMDID,
55 .peer_create_cmdid = WMI_PEER_CREATE_CMDID,
56 .peer_delete_cmdid = WMI_PEER_DELETE_CMDID,
57 .peer_flush_tids_cmdid = WMI_PEER_FLUSH_TIDS_CMDID,
58 .peer_set_param_cmdid = WMI_PEER_SET_PARAM_CMDID,
59 .peer_assoc_cmdid = WMI_PEER_ASSOC_CMDID,
60 .peer_add_wds_entry_cmdid = WMI_PEER_ADD_WDS_ENTRY_CMDID,
61 .peer_remove_wds_entry_cmdid = WMI_PEER_REMOVE_WDS_ENTRY_CMDID,
62 .peer_mcast_group_cmdid = WMI_PEER_MCAST_GROUP_CMDID,
63 .bcn_tx_cmdid = WMI_BCN_TX_CMDID,
64 .pdev_send_bcn_cmdid = WMI_PDEV_SEND_BCN_CMDID,
65 .bcn_tmpl_cmdid = WMI_BCN_TMPL_CMDID,
66 .bcn_filter_rx_cmdid = WMI_BCN_FILTER_RX_CMDID,
67 .prb_req_filter_rx_cmdid = WMI_PRB_REQ_FILTER_RX_CMDID,
68 .mgmt_tx_cmdid = WMI_MGMT_TX_CMDID,
69 .prb_tmpl_cmdid = WMI_PRB_TMPL_CMDID,
70 .addba_clear_resp_cmdid = WMI_ADDBA_CLEAR_RESP_CMDID,
71 .addba_send_cmdid = WMI_ADDBA_SEND_CMDID,
72 .addba_status_cmdid = WMI_ADDBA_STATUS_CMDID,
73 .delba_send_cmdid = WMI_DELBA_SEND_CMDID,
74 .addba_set_resp_cmdid = WMI_ADDBA_SET_RESP_CMDID,
75 .send_singleamsdu_cmdid = WMI_SEND_SINGLEAMSDU_CMDID,
76 .sta_powersave_mode_cmdid = WMI_STA_POWERSAVE_MODE_CMDID,
77 .sta_powersave_param_cmdid = WMI_STA_POWERSAVE_PARAM_CMDID,
78 .sta_mimo_ps_mode_cmdid = WMI_STA_MIMO_PS_MODE_CMDID,
79 .pdev_dfs_enable_cmdid = WMI_PDEV_DFS_ENABLE_CMDID,
80 .pdev_dfs_disable_cmdid = WMI_PDEV_DFS_DISABLE_CMDID,
81 .roam_scan_mode = WMI_ROAM_SCAN_MODE,
82 .roam_scan_rssi_threshold = WMI_ROAM_SCAN_RSSI_THRESHOLD,
83 .roam_scan_period = WMI_ROAM_SCAN_PERIOD,
84 .roam_scan_rssi_change_threshold = WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
85 .roam_ap_profile = WMI_ROAM_AP_PROFILE,
86 .ofl_scan_add_ap_profile = WMI_ROAM_AP_PROFILE,
87 .ofl_scan_remove_ap_profile = WMI_OFL_SCAN_REMOVE_AP_PROFILE,
88 .ofl_scan_period = WMI_OFL_SCAN_PERIOD,
89 .p2p_dev_set_device_info = WMI_P2P_DEV_SET_DEVICE_INFO,
90 .p2p_dev_set_discoverability = WMI_P2P_DEV_SET_DISCOVERABILITY,
91 .p2p_go_set_beacon_ie = WMI_P2P_GO_SET_BEACON_IE,
92 .p2p_go_set_probe_resp_ie = WMI_P2P_GO_SET_PROBE_RESP_IE,
93 .p2p_set_vendor_ie_data_cmdid = WMI_P2P_SET_VENDOR_IE_DATA_CMDID,
94 .ap_ps_peer_param_cmdid = WMI_AP_PS_PEER_PARAM_CMDID,
95 .ap_ps_peer_uapsd_coex_cmdid = WMI_AP_PS_PEER_UAPSD_COEX_CMDID,
96 .peer_rate_retry_sched_cmdid = WMI_PEER_RATE_RETRY_SCHED_CMDID,
97 .wlan_profile_trigger_cmdid = WMI_WLAN_PROFILE_TRIGGER_CMDID,
98 .wlan_profile_set_hist_intvl_cmdid =
99 WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
100 .wlan_profile_get_profile_data_cmdid =
101 WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
102 .wlan_profile_enable_profile_id_cmdid =
103 WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
104 .wlan_profile_list_profile_id_cmdid =
105 WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
106 .pdev_suspend_cmdid = WMI_PDEV_SUSPEND_CMDID,
107 .pdev_resume_cmdid = WMI_PDEV_RESUME_CMDID,
108 .add_bcn_filter_cmdid = WMI_ADD_BCN_FILTER_CMDID,
109 .rmv_bcn_filter_cmdid = WMI_RMV_BCN_FILTER_CMDID,
110 .wow_add_wake_pattern_cmdid = WMI_WOW_ADD_WAKE_PATTERN_CMDID,
111 .wow_del_wake_pattern_cmdid = WMI_WOW_DEL_WAKE_PATTERN_CMDID,
112 .wow_enable_disable_wake_event_cmdid =
113 WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
114 .wow_enable_cmdid = WMI_WOW_ENABLE_CMDID,
115 .wow_hostwakeup_from_sleep_cmdid = WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
116 .rtt_measreq_cmdid = WMI_RTT_MEASREQ_CMDID,
117 .rtt_tsf_cmdid = WMI_RTT_TSF_CMDID,
118 .vdev_spectral_scan_configure_cmdid =
119 WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
120 .vdev_spectral_scan_enable_cmdid = WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
121 .request_stats_cmdid = WMI_REQUEST_STATS_CMDID,
122 .set_arp_ns_offload_cmdid = WMI_SET_ARP_NS_OFFLOAD_CMDID,
123 .network_list_offload_config_cmdid =
124 WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID,
125 .gtk_offload_cmdid = WMI_GTK_OFFLOAD_CMDID,
126 .csa_offload_enable_cmdid = WMI_CSA_OFFLOAD_ENABLE_CMDID,
127 .csa_offload_chanswitch_cmdid = WMI_CSA_OFFLOAD_CHANSWITCH_CMDID,
128 .chatter_set_mode_cmdid = WMI_CHATTER_SET_MODE_CMDID,
129 .peer_tid_addba_cmdid = WMI_PEER_TID_ADDBA_CMDID,
130 .peer_tid_delba_cmdid = WMI_PEER_TID_DELBA_CMDID,
131 .sta_dtim_ps_method_cmdid = WMI_STA_DTIM_PS_METHOD_CMDID,
132 .sta_uapsd_auto_trig_cmdid = WMI_STA_UAPSD_AUTO_TRIG_CMDID,
133 .sta_keepalive_cmd = WMI_STA_KEEPALIVE_CMD,
134 .echo_cmdid = WMI_ECHO_CMDID,
135 .pdev_utf_cmdid = WMI_PDEV_UTF_CMDID,
136 .dbglog_cfg_cmdid = WMI_DBGLOG_CFG_CMDID,
137 .pdev_qvit_cmdid = WMI_PDEV_QVIT_CMDID,
138 .pdev_ftm_intg_cmdid = WMI_PDEV_FTM_INTG_CMDID,
139 .vdev_set_keepalive_cmdid = WMI_VDEV_SET_KEEPALIVE_CMDID,
140 .vdev_get_keepalive_cmdid = WMI_VDEV_GET_KEEPALIVE_CMDID,
141 .force_fw_hang_cmdid = WMI_FORCE_FW_HANG_CMDID,
142 .gpio_config_cmdid = WMI_GPIO_CONFIG_CMDID,
143 .gpio_output_cmdid = WMI_GPIO_OUTPUT_CMDID,
144};
145
146/* TODO: 10.X WMI cmd track */
147
Kalle Valo5e3dd152013-06-12 20:52:10 +0300148int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
149{
150 int ret;
151 ret = wait_for_completion_timeout(&ar->wmi.service_ready,
152 WMI_SERVICE_READY_TIMEOUT_HZ);
153 return ret;
154}
155
156int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar)
157{
158 int ret;
159 ret = wait_for_completion_timeout(&ar->wmi.unified_ready,
160 WMI_UNIFIED_READY_TIMEOUT_HZ);
161 return ret;
162}
163
164static struct sk_buff *ath10k_wmi_alloc_skb(u32 len)
165{
166 struct sk_buff *skb;
167 u32 round_len = roundup(len, 4);
168
169 skb = ath10k_htc_alloc_skb(WMI_SKB_HEADROOM + round_len);
170 if (!skb)
171 return NULL;
172
173 skb_reserve(skb, WMI_SKB_HEADROOM);
174 if (!IS_ALIGNED((unsigned long)skb->data, 4))
175 ath10k_warn("Unaligned WMI skb\n");
176
177 skb_put(skb, round_len);
178 memset(skb->data, 0, round_len);
179
180 return skb;
181}
182
183static void ath10k_wmi_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
184{
185 dev_kfree_skb(skb);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300186}
187
Michal Kaziorbe8b3942013-09-13 14:16:54 +0200188static int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
Bartosz Markowskice428702013-09-26 17:47:05 +0200189 u32 cmd_id)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300190{
191 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
192 struct wmi_cmd_hdr *cmd_hdr;
Michal Kaziorbe8b3942013-09-13 14:16:54 +0200193 int ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300194 u32 cmd = 0;
195
196 if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
197 return -ENOMEM;
198
199 cmd |= SM(cmd_id, WMI_CMD_HDR_CMD_ID);
200
201 cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
202 cmd_hdr->cmd_id = __cpu_to_le32(cmd);
203
Kalle Valo5e3dd152013-06-12 20:52:10 +0300204 memset(skb_cb, 0, sizeof(*skb_cb));
Michal Kaziorbe8b3942013-09-13 14:16:54 +0200205 ret = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb);
206 trace_ath10k_wmi_cmd(cmd_id, skb->data, skb->len, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300207
Michal Kaziorbe8b3942013-09-13 14:16:54 +0200208 if (ret)
209 goto err_pull;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300210
211 return 0;
Michal Kaziorbe8b3942013-09-13 14:16:54 +0200212
213err_pull:
214 skb_pull(skb, sizeof(struct wmi_cmd_hdr));
215 return ret;
216}
217
Michal Kaziored543882013-09-13 14:16:56 +0200218static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif)
219{
220 struct wmi_bcn_tx_arg arg = {0};
221 int ret;
222
223 lockdep_assert_held(&arvif->ar->data_lock);
224
225 if (arvif->beacon == NULL)
226 return;
227
228 arg.vdev_id = arvif->vdev_id;
229 arg.tx_rate = 0;
230 arg.tx_power = 0;
231 arg.bcn = arvif->beacon->data;
232 arg.bcn_len = arvif->beacon->len;
233
234 ret = ath10k_wmi_beacon_send_nowait(arvif->ar, &arg);
235 if (ret)
236 return;
237
238 dev_kfree_skb_any(arvif->beacon);
239 arvif->beacon = NULL;
240}
241
242static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac,
243 struct ieee80211_vif *vif)
244{
245 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
246
247 ath10k_wmi_tx_beacon_nowait(arvif);
248}
249
250static void ath10k_wmi_tx_beacons_nowait(struct ath10k *ar)
251{
252 spin_lock_bh(&ar->data_lock);
253 ieee80211_iterate_active_interfaces_atomic(ar->hw,
254 IEEE80211_IFACE_ITER_NORMAL,
255 ath10k_wmi_tx_beacons_iter,
256 NULL);
257 spin_unlock_bh(&ar->data_lock);
258}
259
Michal Kazior12acbc42013-09-13 14:16:55 +0200260static void ath10k_wmi_op_ep_tx_credits(struct ath10k *ar)
Michal Kaziorbe8b3942013-09-13 14:16:54 +0200261{
Michal Kaziored543882013-09-13 14:16:56 +0200262 /* try to send pending beacons first. they take priority */
263 ath10k_wmi_tx_beacons_nowait(ar);
264
Michal Kaziorbe8b3942013-09-13 14:16:54 +0200265 wake_up(&ar->wmi.tx_credits_wq);
266}
267
268static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb,
Bartosz Markowskice428702013-09-26 17:47:05 +0200269 u32 cmd_id)
Michal Kaziorbe8b3942013-09-13 14:16:54 +0200270{
271 int ret = -EINVAL;
272
273 wait_event_timeout(ar->wmi.tx_credits_wq, ({
Michal Kaziored543882013-09-13 14:16:56 +0200274 /* try to send pending beacons first. they take priority */
275 ath10k_wmi_tx_beacons_nowait(ar);
276
Michal Kaziorbe8b3942013-09-13 14:16:54 +0200277 ret = ath10k_wmi_cmd_send_nowait(ar, skb, cmd_id);
278 (ret != -EAGAIN);
279 }), 3*HZ);
280
281 if (ret)
282 dev_kfree_skb_any(skb);
283
284 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300285}
286
287static int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
288{
289 struct wmi_scan_event *event = (struct wmi_scan_event *)skb->data;
290 enum wmi_scan_event_type event_type;
291 enum wmi_scan_completion_reason reason;
292 u32 freq;
293 u32 req_id;
294 u32 scan_id;
295 u32 vdev_id;
296
297 event_type = __le32_to_cpu(event->event_type);
298 reason = __le32_to_cpu(event->reason);
299 freq = __le32_to_cpu(event->channel_freq);
300 req_id = __le32_to_cpu(event->scan_req_id);
301 scan_id = __le32_to_cpu(event->scan_id);
302 vdev_id = __le32_to_cpu(event->vdev_id);
303
304 ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENTID\n");
305 ath10k_dbg(ATH10K_DBG_WMI,
306 "scan event type %d reason %d freq %d req_id %d "
307 "scan_id %d vdev_id %d\n",
308 event_type, reason, freq, req_id, scan_id, vdev_id);
309
310 spin_lock_bh(&ar->data_lock);
311
312 switch (event_type) {
313 case WMI_SCAN_EVENT_STARTED:
314 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_STARTED\n");
315 if (ar->scan.in_progress && ar->scan.is_roc)
316 ieee80211_ready_on_channel(ar->hw);
317
318 complete(&ar->scan.started);
319 break;
320 case WMI_SCAN_EVENT_COMPLETED:
321 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_COMPLETED\n");
322 switch (reason) {
323 case WMI_SCAN_REASON_COMPLETED:
324 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_COMPLETED\n");
325 break;
326 case WMI_SCAN_REASON_CANCELLED:
327 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_CANCELED\n");
328 break;
329 case WMI_SCAN_REASON_PREEMPTED:
330 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_PREEMPTED\n");
331 break;
332 case WMI_SCAN_REASON_TIMEDOUT:
333 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_TIMEDOUT\n");
334 break;
335 default:
336 break;
337 }
338
339 ar->scan_channel = NULL;
340 if (!ar->scan.in_progress) {
341 ath10k_warn("no scan requested, ignoring\n");
342 break;
343 }
344
345 if (ar->scan.is_roc) {
346 ath10k_offchan_tx_purge(ar);
347
348 if (!ar->scan.aborting)
349 ieee80211_remain_on_channel_expired(ar->hw);
350 } else {
351 ieee80211_scan_completed(ar->hw, ar->scan.aborting);
352 }
353
354 del_timer(&ar->scan.timeout);
355 complete_all(&ar->scan.completed);
356 ar->scan.in_progress = false;
357 break;
358 case WMI_SCAN_EVENT_BSS_CHANNEL:
359 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_BSS_CHANNEL\n");
360 ar->scan_channel = NULL;
361 break;
362 case WMI_SCAN_EVENT_FOREIGN_CHANNEL:
363 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_FOREIGN_CHANNEL\n");
364 ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq);
365 if (ar->scan.in_progress && ar->scan.is_roc &&
366 ar->scan.roc_freq == freq) {
367 complete(&ar->scan.on_channel);
368 }
369 break;
370 case WMI_SCAN_EVENT_DEQUEUED:
371 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_DEQUEUED\n");
372 break;
373 case WMI_SCAN_EVENT_PREEMPTED:
374 ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENT_PREEMPTED\n");
375 break;
376 case WMI_SCAN_EVENT_START_FAILED:
377 ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENT_START_FAILED\n");
378 break;
379 default:
380 break;
381 }
382
383 spin_unlock_bh(&ar->data_lock);
384 return 0;
385}
386
387static inline enum ieee80211_band phy_mode_to_band(u32 phy_mode)
388{
389 enum ieee80211_band band;
390
391 switch (phy_mode) {
392 case MODE_11A:
393 case MODE_11NA_HT20:
394 case MODE_11NA_HT40:
395 case MODE_11AC_VHT20:
396 case MODE_11AC_VHT40:
397 case MODE_11AC_VHT80:
398 band = IEEE80211_BAND_5GHZ;
399 break;
400 case MODE_11G:
401 case MODE_11B:
402 case MODE_11GONLY:
403 case MODE_11NG_HT20:
404 case MODE_11NG_HT40:
405 case MODE_11AC_VHT20_2G:
406 case MODE_11AC_VHT40_2G:
407 case MODE_11AC_VHT80_2G:
408 default:
409 band = IEEE80211_BAND_2GHZ;
410 }
411
412 return band;
413}
414
415static inline u8 get_rate_idx(u32 rate, enum ieee80211_band band)
416{
417 u8 rate_idx = 0;
418
419 /* rate in Kbps */
420 switch (rate) {
421 case 1000:
422 rate_idx = 0;
423 break;
424 case 2000:
425 rate_idx = 1;
426 break;
427 case 5500:
428 rate_idx = 2;
429 break;
430 case 11000:
431 rate_idx = 3;
432 break;
433 case 6000:
434 rate_idx = 4;
435 break;
436 case 9000:
437 rate_idx = 5;
438 break;
439 case 12000:
440 rate_idx = 6;
441 break;
442 case 18000:
443 rate_idx = 7;
444 break;
445 case 24000:
446 rate_idx = 8;
447 break;
448 case 36000:
449 rate_idx = 9;
450 break;
451 case 48000:
452 rate_idx = 10;
453 break;
454 case 54000:
455 rate_idx = 11;
456 break;
457 default:
458 break;
459 }
460
461 if (band == IEEE80211_BAND_5GHZ) {
462 if (rate_idx > 3)
463 /* Omit CCK rates */
464 rate_idx -= 4;
465 else
466 rate_idx = 0;
467 }
468
469 return rate_idx;
470}
471
472static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
473{
Michal Kazior0d9b0432013-08-09 10:13:33 +0200474 struct wmi_mgmt_rx_event_v1 *ev_v1;
475 struct wmi_mgmt_rx_event_v2 *ev_v2;
476 struct wmi_mgmt_rx_hdr_v1 *ev_hdr;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300477 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
478 struct ieee80211_hdr *hdr;
479 u32 rx_status;
480 u32 channel;
481 u32 phy_mode;
482 u32 snr;
483 u32 rate;
484 u32 buf_len;
485 u16 fc;
Michal Kazior0d9b0432013-08-09 10:13:33 +0200486 int pull_len;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300487
Michal Kazior0d9b0432013-08-09 10:13:33 +0200488 if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features)) {
489 ev_v2 = (struct wmi_mgmt_rx_event_v2 *)skb->data;
490 ev_hdr = &ev_v2->hdr.v1;
491 pull_len = sizeof(*ev_v2);
492 } else {
493 ev_v1 = (struct wmi_mgmt_rx_event_v1 *)skb->data;
494 ev_hdr = &ev_v1->hdr;
495 pull_len = sizeof(*ev_v1);
496 }
497
498 channel = __le32_to_cpu(ev_hdr->channel);
499 buf_len = __le32_to_cpu(ev_hdr->buf_len);
500 rx_status = __le32_to_cpu(ev_hdr->status);
501 snr = __le32_to_cpu(ev_hdr->snr);
502 phy_mode = __le32_to_cpu(ev_hdr->phy_mode);
503 rate = __le32_to_cpu(ev_hdr->rate);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300504
505 memset(status, 0, sizeof(*status));
506
507 ath10k_dbg(ATH10K_DBG_MGMT,
508 "event mgmt rx status %08x\n", rx_status);
509
510 if (rx_status & WMI_RX_STATUS_ERR_DECRYPT) {
511 dev_kfree_skb(skb);
512 return 0;
513 }
514
515 if (rx_status & WMI_RX_STATUS_ERR_KEY_CACHE_MISS) {
516 dev_kfree_skb(skb);
517 return 0;
518 }
519
520 if (rx_status & WMI_RX_STATUS_ERR_CRC)
521 status->flag |= RX_FLAG_FAILED_FCS_CRC;
522 if (rx_status & WMI_RX_STATUS_ERR_MIC)
523 status->flag |= RX_FLAG_MMIC_ERROR;
524
525 status->band = phy_mode_to_band(phy_mode);
526 status->freq = ieee80211_channel_to_frequency(channel, status->band);
527 status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR;
528 status->rate_idx = get_rate_idx(rate, status->band);
529
Michal Kazior0d9b0432013-08-09 10:13:33 +0200530 skb_pull(skb, pull_len);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300531
532 hdr = (struct ieee80211_hdr *)skb->data;
533 fc = le16_to_cpu(hdr->frame_control);
534
535 if (fc & IEEE80211_FCTL_PROTECTED) {
536 status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED |
537 RX_FLAG_MMIC_STRIPPED;
538 hdr->frame_control = __cpu_to_le16(fc &
539 ~IEEE80211_FCTL_PROTECTED);
540 }
541
542 ath10k_dbg(ATH10K_DBG_MGMT,
543 "event mgmt rx skb %p len %d ftype %02x stype %02x\n",
544 skb, skb->len,
545 fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
546
547 ath10k_dbg(ATH10K_DBG_MGMT,
548 "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
549 status->freq, status->band, status->signal,
550 status->rate_idx);
551
552 /*
553 * packets from HTC come aligned to 4byte boundaries
554 * because they can originally come in along with a trailer
555 */
556 skb_trim(skb, buf_len);
557
558 ieee80211_rx(ar->hw, skb);
559 return 0;
560}
561
Michal Kazior2e1dea42013-07-31 10:32:40 +0200562static int freq_to_idx(struct ath10k *ar, int freq)
563{
564 struct ieee80211_supported_band *sband;
565 int band, ch, idx = 0;
566
567 for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
568 sband = ar->hw->wiphy->bands[band];
569 if (!sband)
570 continue;
571
572 for (ch = 0; ch < sband->n_channels; ch++, idx++)
573 if (sband->channels[ch].center_freq == freq)
574 goto exit;
575 }
576
577exit:
578 return idx;
579}
580
Kalle Valo5e3dd152013-06-12 20:52:10 +0300581static void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
582{
Michal Kazior2e1dea42013-07-31 10:32:40 +0200583 struct wmi_chan_info_event *ev;
584 struct survey_info *survey;
585 u32 err_code, freq, cmd_flags, noise_floor, rx_clear_count, cycle_count;
586 int idx;
587
588 ev = (struct wmi_chan_info_event *)skb->data;
589
590 err_code = __le32_to_cpu(ev->err_code);
591 freq = __le32_to_cpu(ev->freq);
592 cmd_flags = __le32_to_cpu(ev->cmd_flags);
593 noise_floor = __le32_to_cpu(ev->noise_floor);
594 rx_clear_count = __le32_to_cpu(ev->rx_clear_count);
595 cycle_count = __le32_to_cpu(ev->cycle_count);
596
597 ath10k_dbg(ATH10K_DBG_WMI,
598 "chan info err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d\n",
599 err_code, freq, cmd_flags, noise_floor, rx_clear_count,
600 cycle_count);
601
602 spin_lock_bh(&ar->data_lock);
603
604 if (!ar->scan.in_progress) {
605 ath10k_warn("chan info event without a scan request?\n");
606 goto exit;
607 }
608
609 idx = freq_to_idx(ar, freq);
610 if (idx >= ARRAY_SIZE(ar->survey)) {
611 ath10k_warn("chan info: invalid frequency %d (idx %d out of bounds)\n",
612 freq, idx);
613 goto exit;
614 }
615
616 if (cmd_flags & WMI_CHAN_INFO_FLAG_COMPLETE) {
617 /* During scanning chan info is reported twice for each
618 * visited channel. The reported cycle count is global
619 * and per-channel cycle count must be calculated */
620
621 cycle_count -= ar->survey_last_cycle_count;
622 rx_clear_count -= ar->survey_last_rx_clear_count;
623
624 survey = &ar->survey[idx];
625 survey->channel_time = WMI_CHAN_INFO_MSEC(cycle_count);
626 survey->channel_time_rx = WMI_CHAN_INFO_MSEC(rx_clear_count);
627 survey->noise = noise_floor;
628 survey->filled = SURVEY_INFO_CHANNEL_TIME |
629 SURVEY_INFO_CHANNEL_TIME_RX |
630 SURVEY_INFO_NOISE_DBM;
631 }
632
633 ar->survey_last_rx_clear_count = rx_clear_count;
634 ar->survey_last_cycle_count = cycle_count;
635
636exit:
637 spin_unlock_bh(&ar->data_lock);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300638}
639
640static void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb)
641{
642 ath10k_dbg(ATH10K_DBG_WMI, "WMI_ECHO_EVENTID\n");
643}
644
645static void ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)
646{
647 ath10k_dbg(ATH10K_DBG_WMI, "WMI_DEBUG_MESG_EVENTID\n");
648}
649
650static void ath10k_wmi_event_update_stats(struct ath10k *ar,
651 struct sk_buff *skb)
652{
653 struct wmi_stats_event *ev = (struct wmi_stats_event *)skb->data;
654
655 ath10k_dbg(ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n");
656
657 ath10k_debug_read_target_stats(ar, ev);
658}
659
660static void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar,
661 struct sk_buff *skb)
662{
663 struct wmi_vdev_start_response_event *ev;
664
665 ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n");
666
667 ev = (struct wmi_vdev_start_response_event *)skb->data;
668
669 if (WARN_ON(__le32_to_cpu(ev->status)))
670 return;
671
672 complete(&ar->vdev_setup_done);
673}
674
675static void ath10k_wmi_event_vdev_stopped(struct ath10k *ar,
676 struct sk_buff *skb)
677{
678 ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_STOPPED_EVENTID\n");
679 complete(&ar->vdev_setup_done);
680}
681
682static void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar,
683 struct sk_buff *skb)
684{
685 ath10k_dbg(ATH10K_DBG_WMI, "WMI_PEER_STA_KICKOUT_EVENTID\n");
686}
687
688/*
689 * FIXME
690 *
691 * We don't report to mac80211 sleep state of connected
692 * stations. Due to this mac80211 can't fill in TIM IE
693 * correctly.
694 *
695 * I know of no way of getting nullfunc frames that contain
696 * sleep transition from connected stations - these do not
697 * seem to be sent from the target to the host. There also
698 * doesn't seem to be a dedicated event for that. So the
699 * only way left to do this would be to read tim_bitmap
700 * during SWBA.
701 *
702 * We could probably try using tim_bitmap from SWBA to tell
703 * mac80211 which stations are asleep and which are not. The
704 * problem here is calling mac80211 functions so many times
705 * could take too long and make us miss the time to submit
706 * the beacon to the target.
707 *
708 * So as a workaround we try to extend the TIM IE if there
709 * is unicast buffered for stations with aid > 7 and fill it
710 * in ourselves.
711 */
712static void ath10k_wmi_update_tim(struct ath10k *ar,
713 struct ath10k_vif *arvif,
714 struct sk_buff *bcn,
715 struct wmi_bcn_info *bcn_info)
716{
717 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)bcn->data;
718 struct ieee80211_tim_ie *tim;
719 u8 *ies, *ie;
720 u8 ie_len, pvm_len;
721
722 /* if next SWBA has no tim_changed the tim_bitmap is garbage.
723 * we must copy the bitmap upon change and reuse it later */
724 if (__le32_to_cpu(bcn_info->tim_info.tim_changed)) {
725 int i;
726
727 BUILD_BUG_ON(sizeof(arvif->u.ap.tim_bitmap) !=
728 sizeof(bcn_info->tim_info.tim_bitmap));
729
730 for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++) {
731 __le32 t = bcn_info->tim_info.tim_bitmap[i / 4];
732 u32 v = __le32_to_cpu(t);
733 arvif->u.ap.tim_bitmap[i] = (v >> ((i % 4) * 8)) & 0xFF;
734 }
735
736 /* FW reports either length 0 or 16
737 * so we calculate this on our own */
738 arvif->u.ap.tim_len = 0;
739 for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++)
740 if (arvif->u.ap.tim_bitmap[i])
741 arvif->u.ap.tim_len = i;
742
743 arvif->u.ap.tim_len++;
744 }
745
746 ies = bcn->data;
747 ies += ieee80211_hdrlen(hdr->frame_control);
748 ies += 12; /* fixed parameters */
749
750 ie = (u8 *)cfg80211_find_ie(WLAN_EID_TIM, ies,
751 (u8 *)skb_tail_pointer(bcn) - ies);
752 if (!ie) {
Michal Kazior09af8f82013-07-05 16:15:08 +0300753 if (arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
754 ath10k_warn("no tim ie found;\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +0300755 return;
756 }
757
758 tim = (void *)ie + 2;
759 ie_len = ie[1];
760 pvm_len = ie_len - 3; /* exclude dtim count, dtim period, bmap ctl */
761
762 if (pvm_len < arvif->u.ap.tim_len) {
763 int expand_size = sizeof(arvif->u.ap.tim_bitmap) - pvm_len;
764 int move_size = skb_tail_pointer(bcn) - (ie + 2 + ie_len);
765 void *next_ie = ie + 2 + ie_len;
766
767 if (skb_put(bcn, expand_size)) {
768 memmove(next_ie + expand_size, next_ie, move_size);
769
770 ie[1] += expand_size;
771 ie_len += expand_size;
772 pvm_len += expand_size;
773 } else {
774 ath10k_warn("tim expansion failed\n");
775 }
776 }
777
778 if (pvm_len > sizeof(arvif->u.ap.tim_bitmap)) {
779 ath10k_warn("tim pvm length is too great (%d)\n", pvm_len);
780 return;
781 }
782
783 tim->bitmap_ctrl = !!__le32_to_cpu(bcn_info->tim_info.tim_mcast);
784 memcpy(tim->virtual_map, arvif->u.ap.tim_bitmap, pvm_len);
785
786 ath10k_dbg(ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n",
787 tim->dtim_count, tim->dtim_period,
788 tim->bitmap_ctrl, pvm_len);
789}
790
791static void ath10k_p2p_fill_noa_ie(u8 *data, u32 len,
792 struct wmi_p2p_noa_info *noa)
793{
794 struct ieee80211_p2p_noa_attr *noa_attr;
795 u8 ctwindow_oppps = noa->ctwindow_oppps;
796 u8 ctwindow = ctwindow_oppps >> WMI_P2P_OPPPS_CTWINDOW_OFFSET;
797 bool oppps = !!(ctwindow_oppps & WMI_P2P_OPPPS_ENABLE_BIT);
798 __le16 *noa_attr_len;
799 u16 attr_len;
800 u8 noa_descriptors = noa->num_descriptors;
801 int i;
802
803 /* P2P IE */
804 data[0] = WLAN_EID_VENDOR_SPECIFIC;
805 data[1] = len - 2;
806 data[2] = (WLAN_OUI_WFA >> 16) & 0xff;
807 data[3] = (WLAN_OUI_WFA >> 8) & 0xff;
808 data[4] = (WLAN_OUI_WFA >> 0) & 0xff;
809 data[5] = WLAN_OUI_TYPE_WFA_P2P;
810
811 /* NOA ATTR */
812 data[6] = IEEE80211_P2P_ATTR_ABSENCE_NOTICE;
813 noa_attr_len = (__le16 *)&data[7]; /* 2 bytes */
814 noa_attr = (struct ieee80211_p2p_noa_attr *)&data[9];
815
816 noa_attr->index = noa->index;
817 noa_attr->oppps_ctwindow = ctwindow;
818 if (oppps)
819 noa_attr->oppps_ctwindow |= IEEE80211_P2P_OPPPS_ENABLE_BIT;
820
821 for (i = 0; i < noa_descriptors; i++) {
822 noa_attr->desc[i].count =
823 __le32_to_cpu(noa->descriptors[i].type_count);
824 noa_attr->desc[i].duration = noa->descriptors[i].duration;
825 noa_attr->desc[i].interval = noa->descriptors[i].interval;
826 noa_attr->desc[i].start_time = noa->descriptors[i].start_time;
827 }
828
829 attr_len = 2; /* index + oppps_ctwindow */
830 attr_len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
831 *noa_attr_len = __cpu_to_le16(attr_len);
832}
833
834static u32 ath10k_p2p_calc_noa_ie_len(struct wmi_p2p_noa_info *noa)
835{
836 u32 len = 0;
837 u8 noa_descriptors = noa->num_descriptors;
838 u8 opp_ps_info = noa->ctwindow_oppps;
839 bool opps_enabled = !!(opp_ps_info & WMI_P2P_OPPPS_ENABLE_BIT);
840
841
842 if (!noa_descriptors && !opps_enabled)
843 return len;
844
845 len += 1 + 1 + 4; /* EID + len + OUI */
846 len += 1 + 2; /* noa attr + attr len */
847 len += 1 + 1; /* index + oppps_ctwindow */
848 len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
849
850 return len;
851}
852
853static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif,
854 struct sk_buff *bcn,
855 struct wmi_bcn_info *bcn_info)
856{
857 struct wmi_p2p_noa_info *noa = &bcn_info->p2p_noa_info;
858 u8 *new_data, *old_data = arvif->u.ap.noa_data;
859 u32 new_len;
860
861 if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
862 return;
863
864 ath10k_dbg(ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed);
865 if (noa->changed & WMI_P2P_NOA_CHANGED_BIT) {
866 new_len = ath10k_p2p_calc_noa_ie_len(noa);
867 if (!new_len)
868 goto cleanup;
869
870 new_data = kmalloc(new_len, GFP_ATOMIC);
871 if (!new_data)
872 goto cleanup;
873
874 ath10k_p2p_fill_noa_ie(new_data, new_len, noa);
875
876 spin_lock_bh(&ar->data_lock);
877 arvif->u.ap.noa_data = new_data;
878 arvif->u.ap.noa_len = new_len;
879 spin_unlock_bh(&ar->data_lock);
880 kfree(old_data);
881 }
882
883 if (arvif->u.ap.noa_data)
884 if (!pskb_expand_head(bcn, 0, arvif->u.ap.noa_len, GFP_ATOMIC))
885 memcpy(skb_put(bcn, arvif->u.ap.noa_len),
886 arvif->u.ap.noa_data,
887 arvif->u.ap.noa_len);
888 return;
889
890cleanup:
891 spin_lock_bh(&ar->data_lock);
892 arvif->u.ap.noa_data = NULL;
893 arvif->u.ap.noa_len = 0;
894 spin_unlock_bh(&ar->data_lock);
895 kfree(old_data);
896}
897
898
899static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
900{
901 struct wmi_host_swba_event *ev;
902 u32 map;
903 int i = -1;
904 struct wmi_bcn_info *bcn_info;
905 struct ath10k_vif *arvif;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300906 struct sk_buff *bcn;
907 int vdev_id = 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300908
909 ath10k_dbg(ATH10K_DBG_MGMT, "WMI_HOST_SWBA_EVENTID\n");
910
911 ev = (struct wmi_host_swba_event *)skb->data;
912 map = __le32_to_cpu(ev->vdev_map);
913
914 ath10k_dbg(ATH10K_DBG_MGMT, "host swba:\n"
915 "-vdev map 0x%x\n",
916 ev->vdev_map);
917
918 for (; map; map >>= 1, vdev_id++) {
919 if (!(map & 0x1))
920 continue;
921
922 i++;
923
924 if (i >= WMI_MAX_AP_VDEV) {
925 ath10k_warn("swba has corrupted vdev map\n");
926 break;
927 }
928
929 bcn_info = &ev->bcn_info[i];
930
931 ath10k_dbg(ATH10K_DBG_MGMT,
932 "-bcn_info[%d]:\n"
933 "--tim_len %d\n"
934 "--tim_mcast %d\n"
935 "--tim_changed %d\n"
936 "--tim_num_ps_pending %d\n"
937 "--tim_bitmap 0x%08x%08x%08x%08x\n",
938 i,
939 __le32_to_cpu(bcn_info->tim_info.tim_len),
940 __le32_to_cpu(bcn_info->tim_info.tim_mcast),
941 __le32_to_cpu(bcn_info->tim_info.tim_changed),
942 __le32_to_cpu(bcn_info->tim_info.tim_num_ps_pending),
943 __le32_to_cpu(bcn_info->tim_info.tim_bitmap[3]),
944 __le32_to_cpu(bcn_info->tim_info.tim_bitmap[2]),
945 __le32_to_cpu(bcn_info->tim_info.tim_bitmap[1]),
946 __le32_to_cpu(bcn_info->tim_info.tim_bitmap[0]));
947
948 arvif = ath10k_get_arvif(ar, vdev_id);
949 if (arvif == NULL) {
950 ath10k_warn("no vif for vdev_id %d found\n", vdev_id);
951 continue;
952 }
953
954 bcn = ieee80211_beacon_get(ar->hw, arvif->vif);
955 if (!bcn) {
956 ath10k_warn("could not get mac80211 beacon\n");
957 continue;
958 }
959
960 ath10k_tx_h_seq_no(bcn);
961 ath10k_wmi_update_tim(ar, arvif, bcn, bcn_info);
962 ath10k_wmi_update_noa(ar, arvif, bcn, bcn_info);
963
Michal Kaziored543882013-09-13 14:16:56 +0200964 spin_lock_bh(&ar->data_lock);
965 if (arvif->beacon) {
966 ath10k_warn("SWBA overrun on vdev %d\n",
967 arvif->vdev_id);
968 dev_kfree_skb_any(arvif->beacon);
969 }
Kalle Valo5e3dd152013-06-12 20:52:10 +0300970
Michal Kaziored543882013-09-13 14:16:56 +0200971 arvif->beacon = bcn;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300972
Michal Kaziored543882013-09-13 14:16:56 +0200973 ath10k_wmi_tx_beacon_nowait(arvif);
974 spin_unlock_bh(&ar->data_lock);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300975 }
976}
977
978static void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar,
979 struct sk_buff *skb)
980{
981 ath10k_dbg(ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n");
982}
983
984static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
985{
986 ath10k_dbg(ATH10K_DBG_WMI, "WMI_PHYERR_EVENTID\n");
987}
988
989static void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb)
990{
991 ath10k_dbg(ATH10K_DBG_WMI, "WMI_ROAM_EVENTID\n");
992}
993
994static void ath10k_wmi_event_profile_match(struct ath10k *ar,
995 struct sk_buff *skb)
996{
997 ath10k_dbg(ATH10K_DBG_WMI, "WMI_PROFILE_MATCH\n");
998}
999
1000static void ath10k_wmi_event_debug_print(struct ath10k *ar,
1001 struct sk_buff *skb)
1002{
1003 ath10k_dbg(ATH10K_DBG_WMI, "WMI_DEBUG_PRINT_EVENTID\n");
1004}
1005
1006static void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb)
1007{
1008 ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_QVIT_EVENTID\n");
1009}
1010
1011static void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar,
1012 struct sk_buff *skb)
1013{
1014 ath10k_dbg(ATH10K_DBG_WMI, "WMI_WLAN_PROFILE_DATA_EVENTID\n");
1015}
1016
1017static void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar,
1018 struct sk_buff *skb)
1019{
1020 ath10k_dbg(ATH10K_DBG_WMI, "WMI_RTT_MEASUREMENT_REPORT_EVENTID\n");
1021}
1022
1023static void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar,
1024 struct sk_buff *skb)
1025{
1026 ath10k_dbg(ATH10K_DBG_WMI, "WMI_TSF_MEASUREMENT_REPORT_EVENTID\n");
1027}
1028
1029static void ath10k_wmi_event_rtt_error_report(struct ath10k *ar,
1030 struct sk_buff *skb)
1031{
1032 ath10k_dbg(ATH10K_DBG_WMI, "WMI_RTT_ERROR_REPORT_EVENTID\n");
1033}
1034
1035static void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar,
1036 struct sk_buff *skb)
1037{
1038 ath10k_dbg(ATH10K_DBG_WMI, "WMI_WOW_WAKEUP_HOST_EVENTID\n");
1039}
1040
1041static void ath10k_wmi_event_dcs_interference(struct ath10k *ar,
1042 struct sk_buff *skb)
1043{
1044 ath10k_dbg(ATH10K_DBG_WMI, "WMI_DCS_INTERFERENCE_EVENTID\n");
1045}
1046
1047static void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar,
1048 struct sk_buff *skb)
1049{
1050 ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_TPC_CONFIG_EVENTID\n");
1051}
1052
1053static void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar,
1054 struct sk_buff *skb)
1055{
1056 ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n");
1057}
1058
1059static void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar,
1060 struct sk_buff *skb)
1061{
1062 ath10k_dbg(ATH10K_DBG_WMI, "WMI_GTK_OFFLOAD_STATUS_EVENTID\n");
1063}
1064
1065static void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar,
1066 struct sk_buff *skb)
1067{
1068 ath10k_dbg(ATH10K_DBG_WMI, "WMI_GTK_REKEY_FAIL_EVENTID\n");
1069}
1070
1071static void ath10k_wmi_event_delba_complete(struct ath10k *ar,
1072 struct sk_buff *skb)
1073{
1074 ath10k_dbg(ATH10K_DBG_WMI, "WMI_TX_DELBA_COMPLETE_EVENTID\n");
1075}
1076
1077static void ath10k_wmi_event_addba_complete(struct ath10k *ar,
1078 struct sk_buff *skb)
1079{
1080 ath10k_dbg(ATH10K_DBG_WMI, "WMI_TX_ADDBA_COMPLETE_EVENTID\n");
1081}
1082
1083static void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar,
1084 struct sk_buff *skb)
1085{
1086 ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n");
1087}
1088
1089static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
1090 struct sk_buff *skb)
1091{
1092 struct wmi_service_ready_event *ev = (void *)skb->data;
1093
1094 if (skb->len < sizeof(*ev)) {
1095 ath10k_warn("Service ready event was %d B but expected %zu B. Wrong firmware version?\n",
1096 skb->len, sizeof(*ev));
1097 return;
1098 }
1099
1100 ar->hw_min_tx_power = __le32_to_cpu(ev->hw_min_tx_power);
1101 ar->hw_max_tx_power = __le32_to_cpu(ev->hw_max_tx_power);
1102 ar->ht_cap_info = __le32_to_cpu(ev->ht_cap_info);
1103 ar->vht_cap_info = __le32_to_cpu(ev->vht_cap_info);
1104 ar->fw_version_major =
1105 (__le32_to_cpu(ev->sw_version) & 0xff000000) >> 24;
1106 ar->fw_version_minor = (__le32_to_cpu(ev->sw_version) & 0x00ffffff);
1107 ar->fw_version_release =
1108 (__le32_to_cpu(ev->sw_version_1) & 0xffff0000) >> 16;
1109 ar->fw_version_build = (__le32_to_cpu(ev->sw_version_1) & 0x0000ffff);
1110 ar->phy_capability = __le32_to_cpu(ev->phy_capability);
Michal Kazior8865bee42013-07-24 12:36:46 +02001111 ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains);
1112
Michal Kazior0d9b0432013-08-09 10:13:33 +02001113 if (ar->fw_version_build > 636)
1114 set_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features);
1115
Michal Kazior8865bee42013-07-24 12:36:46 +02001116 if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) {
1117 ath10k_warn("hardware advertises support for more spatial streams than it should (%d > %d)\n",
1118 ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM);
1119 ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM;
1120 }
Kalle Valo5e3dd152013-06-12 20:52:10 +03001121
1122 ar->ath_common.regulatory.current_rd =
1123 __le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd);
1124
1125 ath10k_debug_read_service_map(ar, ev->wmi_service_bitmap,
1126 sizeof(ev->wmi_service_bitmap));
1127
1128 if (strlen(ar->hw->wiphy->fw_version) == 0) {
1129 snprintf(ar->hw->wiphy->fw_version,
1130 sizeof(ar->hw->wiphy->fw_version),
1131 "%u.%u.%u.%u",
1132 ar->fw_version_major,
1133 ar->fw_version_minor,
1134 ar->fw_version_release,
1135 ar->fw_version_build);
1136 }
1137
1138 /* FIXME: it probably should be better to support this */
1139 if (__le32_to_cpu(ev->num_mem_reqs) > 0) {
1140 ath10k_warn("target requested %d memory chunks; ignoring\n",
1141 __le32_to_cpu(ev->num_mem_reqs));
1142 }
1143
1144 ath10k_dbg(ATH10K_DBG_WMI,
Michal Kazior8865bee42013-07-24 12:36:46 +02001145 "wmi event service ready sw_ver 0x%08x sw_ver1 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u num_rf_chains %u\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03001146 __le32_to_cpu(ev->sw_version),
1147 __le32_to_cpu(ev->sw_version_1),
1148 __le32_to_cpu(ev->abi_version),
1149 __le32_to_cpu(ev->phy_capability),
1150 __le32_to_cpu(ev->ht_cap_info),
1151 __le32_to_cpu(ev->vht_cap_info),
1152 __le32_to_cpu(ev->vht_supp_mcs),
1153 __le32_to_cpu(ev->sys_cap_info),
Michal Kazior8865bee42013-07-24 12:36:46 +02001154 __le32_to_cpu(ev->num_mem_reqs),
1155 __le32_to_cpu(ev->num_rf_chains));
Kalle Valo5e3dd152013-06-12 20:52:10 +03001156
1157 complete(&ar->wmi.service_ready);
1158}
1159
1160static int ath10k_wmi_ready_event_rx(struct ath10k *ar, struct sk_buff *skb)
1161{
1162 struct wmi_ready_event *ev = (struct wmi_ready_event *)skb->data;
1163
1164 if (WARN_ON(skb->len < sizeof(*ev)))
1165 return -EINVAL;
1166
1167 memcpy(ar->mac_addr, ev->mac_addr.addr, ETH_ALEN);
1168
1169 ath10k_dbg(ATH10K_DBG_WMI,
1170 "wmi event ready sw_version %u abi_version %u mac_addr %pM status %d\n",
1171 __le32_to_cpu(ev->sw_version),
1172 __le32_to_cpu(ev->abi_version),
1173 ev->mac_addr.addr,
1174 __le32_to_cpu(ev->status));
1175
1176 complete(&ar->wmi.unified_ready);
1177 return 0;
1178}
1179
Bartosz Markowskice428702013-09-26 17:47:05 +02001180static void ath10k_wmi_main_process_rx(struct ath10k *ar, struct sk_buff *skb)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001181{
1182 struct wmi_cmd_hdr *cmd_hdr;
1183 enum wmi_event_id id;
1184 u16 len;
1185
1186 cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
1187 id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
1188
1189 if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
1190 return;
1191
1192 len = skb->len;
1193
1194 trace_ath10k_wmi_event(id, skb->data, skb->len);
1195
1196 switch (id) {
1197 case WMI_MGMT_RX_EVENTID:
1198 ath10k_wmi_event_mgmt_rx(ar, skb);
1199 /* mgmt_rx() owns the skb now! */
1200 return;
1201 case WMI_SCAN_EVENTID:
1202 ath10k_wmi_event_scan(ar, skb);
1203 break;
1204 case WMI_CHAN_INFO_EVENTID:
1205 ath10k_wmi_event_chan_info(ar, skb);
1206 break;
1207 case WMI_ECHO_EVENTID:
1208 ath10k_wmi_event_echo(ar, skb);
1209 break;
1210 case WMI_DEBUG_MESG_EVENTID:
1211 ath10k_wmi_event_debug_mesg(ar, skb);
1212 break;
1213 case WMI_UPDATE_STATS_EVENTID:
1214 ath10k_wmi_event_update_stats(ar, skb);
1215 break;
1216 case WMI_VDEV_START_RESP_EVENTID:
1217 ath10k_wmi_event_vdev_start_resp(ar, skb);
1218 break;
1219 case WMI_VDEV_STOPPED_EVENTID:
1220 ath10k_wmi_event_vdev_stopped(ar, skb);
1221 break;
1222 case WMI_PEER_STA_KICKOUT_EVENTID:
1223 ath10k_wmi_event_peer_sta_kickout(ar, skb);
1224 break;
1225 case WMI_HOST_SWBA_EVENTID:
1226 ath10k_wmi_event_host_swba(ar, skb);
1227 break;
1228 case WMI_TBTTOFFSET_UPDATE_EVENTID:
1229 ath10k_wmi_event_tbttoffset_update(ar, skb);
1230 break;
1231 case WMI_PHYERR_EVENTID:
1232 ath10k_wmi_event_phyerr(ar, skb);
1233 break;
1234 case WMI_ROAM_EVENTID:
1235 ath10k_wmi_event_roam(ar, skb);
1236 break;
1237 case WMI_PROFILE_MATCH:
1238 ath10k_wmi_event_profile_match(ar, skb);
1239 break;
1240 case WMI_DEBUG_PRINT_EVENTID:
1241 ath10k_wmi_event_debug_print(ar, skb);
1242 break;
1243 case WMI_PDEV_QVIT_EVENTID:
1244 ath10k_wmi_event_pdev_qvit(ar, skb);
1245 break;
1246 case WMI_WLAN_PROFILE_DATA_EVENTID:
1247 ath10k_wmi_event_wlan_profile_data(ar, skb);
1248 break;
1249 case WMI_RTT_MEASUREMENT_REPORT_EVENTID:
1250 ath10k_wmi_event_rtt_measurement_report(ar, skb);
1251 break;
1252 case WMI_TSF_MEASUREMENT_REPORT_EVENTID:
1253 ath10k_wmi_event_tsf_measurement_report(ar, skb);
1254 break;
1255 case WMI_RTT_ERROR_REPORT_EVENTID:
1256 ath10k_wmi_event_rtt_error_report(ar, skb);
1257 break;
1258 case WMI_WOW_WAKEUP_HOST_EVENTID:
1259 ath10k_wmi_event_wow_wakeup_host(ar, skb);
1260 break;
1261 case WMI_DCS_INTERFERENCE_EVENTID:
1262 ath10k_wmi_event_dcs_interference(ar, skb);
1263 break;
1264 case WMI_PDEV_TPC_CONFIG_EVENTID:
1265 ath10k_wmi_event_pdev_tpc_config(ar, skb);
1266 break;
1267 case WMI_PDEV_FTM_INTG_EVENTID:
1268 ath10k_wmi_event_pdev_ftm_intg(ar, skb);
1269 break;
1270 case WMI_GTK_OFFLOAD_STATUS_EVENTID:
1271 ath10k_wmi_event_gtk_offload_status(ar, skb);
1272 break;
1273 case WMI_GTK_REKEY_FAIL_EVENTID:
1274 ath10k_wmi_event_gtk_rekey_fail(ar, skb);
1275 break;
1276 case WMI_TX_DELBA_COMPLETE_EVENTID:
1277 ath10k_wmi_event_delba_complete(ar, skb);
1278 break;
1279 case WMI_TX_ADDBA_COMPLETE_EVENTID:
1280 ath10k_wmi_event_addba_complete(ar, skb);
1281 break;
1282 case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
1283 ath10k_wmi_event_vdev_install_key_complete(ar, skb);
1284 break;
1285 case WMI_SERVICE_READY_EVENTID:
1286 ath10k_wmi_service_ready_event_rx(ar, skb);
1287 break;
1288 case WMI_READY_EVENTID:
1289 ath10k_wmi_ready_event_rx(ar, skb);
1290 break;
1291 default:
1292 ath10k_warn("Unknown eventid: %d\n", id);
1293 break;
1294 }
1295
1296 dev_kfree_skb(skb);
1297}
1298
Bartosz Markowskice428702013-09-26 17:47:05 +02001299static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
1300{
1301 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
1302 ath10k_warn("Firmware 10.X is not yet supported\n");
1303 else
1304 ath10k_wmi_main_process_rx(ar, skb);
1305}
1306
Kalle Valo5e3dd152013-06-12 20:52:10 +03001307/* WMI Initialization functions */
1308int ath10k_wmi_attach(struct ath10k *ar)
1309{
Bartosz Markowskice428702013-09-26 17:47:05 +02001310 int ret = 0;
1311
1312 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
1313 ath10k_warn("Firmware 10.X is not yet supported\n");
1314 ret = -ENOTSUPP;
1315 } else {
1316 ar->wmi.cmd = &wmi_cmd_map;
1317 }
1318
Kalle Valo5e3dd152013-06-12 20:52:10 +03001319 init_completion(&ar->wmi.service_ready);
1320 init_completion(&ar->wmi.unified_ready);
Michal Kaziorbe8b3942013-09-13 14:16:54 +02001321 init_waitqueue_head(&ar->wmi.tx_credits_wq);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001322
Bartosz Markowskice428702013-09-26 17:47:05 +02001323 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001324}
1325
1326void ath10k_wmi_detach(struct ath10k *ar)
1327{
Kalle Valo5e3dd152013-06-12 20:52:10 +03001328}
1329
1330int ath10k_wmi_connect_htc_service(struct ath10k *ar)
1331{
1332 int status;
1333 struct ath10k_htc_svc_conn_req conn_req;
1334 struct ath10k_htc_svc_conn_resp conn_resp;
1335
1336 memset(&conn_req, 0, sizeof(conn_req));
1337 memset(&conn_resp, 0, sizeof(conn_resp));
1338
1339 /* these fields are the same for all service endpoints */
1340 conn_req.ep_ops.ep_tx_complete = ath10k_wmi_htc_tx_complete;
1341 conn_req.ep_ops.ep_rx_complete = ath10k_wmi_process_rx;
Michal Kaziorbe8b3942013-09-13 14:16:54 +02001342 conn_req.ep_ops.ep_tx_credits = ath10k_wmi_op_ep_tx_credits;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001343
1344 /* connect to control service */
1345 conn_req.service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL;
1346
Michal Kaziorcd003fa2013-07-05 16:15:13 +03001347 status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001348 if (status) {
1349 ath10k_warn("failed to connect to WMI CONTROL service status: %d\n",
1350 status);
1351 return status;
1352 }
1353
1354 ar->wmi.eid = conn_resp.eid;
1355 return 0;
1356}
1357
1358int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
1359 u16 rd5g, u16 ctl2g, u16 ctl5g)
1360{
1361 struct wmi_pdev_set_regdomain_cmd *cmd;
1362 struct sk_buff *skb;
1363
1364 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1365 if (!skb)
1366 return -ENOMEM;
1367
1368 cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data;
1369 cmd->reg_domain = __cpu_to_le32(rd);
1370 cmd->reg_domain_2G = __cpu_to_le32(rd2g);
1371 cmd->reg_domain_5G = __cpu_to_le32(rd5g);
1372 cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g);
1373 cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
1374
1375 ath10k_dbg(ATH10K_DBG_WMI,
1376 "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x\n",
1377 rd, rd2g, rd5g, ctl2g, ctl5g);
1378
Bartosz Markowskice428702013-09-26 17:47:05 +02001379 return ath10k_wmi_cmd_send(ar, skb,
1380 ar->wmi.cmd->pdev_set_regdomain_cmdid);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001381}
1382
1383int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
1384 const struct wmi_channel_arg *arg)
1385{
1386 struct wmi_set_channel_cmd *cmd;
1387 struct sk_buff *skb;
1388
1389 if (arg->passive)
1390 return -EINVAL;
1391
1392 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1393 if (!skb)
1394 return -ENOMEM;
1395
1396 cmd = (struct wmi_set_channel_cmd *)skb->data;
1397 cmd->chan.mhz = __cpu_to_le32(arg->freq);
1398 cmd->chan.band_center_freq1 = __cpu_to_le32(arg->freq);
1399 cmd->chan.mode = arg->mode;
1400 cmd->chan.min_power = arg->min_power;
1401 cmd->chan.max_power = arg->max_power;
1402 cmd->chan.reg_power = arg->max_reg_power;
1403 cmd->chan.reg_classid = arg->reg_class_id;
1404 cmd->chan.antenna_max = arg->max_antenna_gain;
1405
1406 ath10k_dbg(ATH10K_DBG_WMI,
1407 "wmi set channel mode %d freq %d\n",
1408 arg->mode, arg->freq);
1409
Bartosz Markowskice428702013-09-26 17:47:05 +02001410 return ath10k_wmi_cmd_send(ar, skb,
1411 ar->wmi.cmd->pdev_set_channel_cmdid);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001412}
1413
1414int ath10k_wmi_pdev_suspend_target(struct ath10k *ar)
1415{
1416 struct wmi_pdev_suspend_cmd *cmd;
1417 struct sk_buff *skb;
1418
1419 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1420 if (!skb)
1421 return -ENOMEM;
1422
1423 cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
1424 cmd->suspend_opt = WMI_PDEV_SUSPEND;
1425
Bartosz Markowskice428702013-09-26 17:47:05 +02001426 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001427}
1428
1429int ath10k_wmi_pdev_resume_target(struct ath10k *ar)
1430{
1431 struct sk_buff *skb;
1432
1433 skb = ath10k_wmi_alloc_skb(0);
1434 if (skb == NULL)
1435 return -ENOMEM;
1436
Bartosz Markowskice428702013-09-26 17:47:05 +02001437 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001438}
1439
1440int ath10k_wmi_pdev_set_param(struct ath10k *ar, enum wmi_pdev_param id,
1441 u32 value)
1442{
1443 struct wmi_pdev_set_param_cmd *cmd;
1444 struct sk_buff *skb;
1445
1446 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1447 if (!skb)
1448 return -ENOMEM;
1449
1450 cmd = (struct wmi_pdev_set_param_cmd *)skb->data;
1451 cmd->param_id = __cpu_to_le32(id);
1452 cmd->param_value = __cpu_to_le32(value);
1453
1454 ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n",
1455 id, value);
Bartosz Markowskice428702013-09-26 17:47:05 +02001456 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001457}
1458
1459int ath10k_wmi_cmd_init(struct ath10k *ar)
1460{
1461 struct wmi_init_cmd *cmd;
1462 struct sk_buff *buf;
1463 struct wmi_resource_config config = {};
1464 u32 val;
1465
1466 config.num_vdevs = __cpu_to_le32(TARGET_NUM_VDEVS);
1467 config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS + TARGET_NUM_VDEVS);
1468 config.num_offload_peers = __cpu_to_le32(TARGET_NUM_OFFLOAD_PEERS);
1469
1470 config.num_offload_reorder_bufs =
1471 __cpu_to_le32(TARGET_NUM_OFFLOAD_REORDER_BUFS);
1472
1473 config.num_peer_keys = __cpu_to_le32(TARGET_NUM_PEER_KEYS);
1474 config.num_tids = __cpu_to_le32(TARGET_NUM_TIDS);
1475 config.ast_skid_limit = __cpu_to_le32(TARGET_AST_SKID_LIMIT);
1476 config.tx_chain_mask = __cpu_to_le32(TARGET_TX_CHAIN_MASK);
1477 config.rx_chain_mask = __cpu_to_le32(TARGET_RX_CHAIN_MASK);
1478 config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
1479 config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
1480 config.rx_timeout_pri_be = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
1481 config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_RX_TIMEOUT_HI_PRI);
1482 config.rx_decap_mode = __cpu_to_le32(TARGET_RX_DECAP_MODE);
1483
1484 config.scan_max_pending_reqs =
1485 __cpu_to_le32(TARGET_SCAN_MAX_PENDING_REQS);
1486
1487 config.bmiss_offload_max_vdev =
1488 __cpu_to_le32(TARGET_BMISS_OFFLOAD_MAX_VDEV);
1489
1490 config.roam_offload_max_vdev =
1491 __cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_VDEV);
1492
1493 config.roam_offload_max_ap_profiles =
1494 __cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES);
1495
1496 config.num_mcast_groups = __cpu_to_le32(TARGET_NUM_MCAST_GROUPS);
1497 config.num_mcast_table_elems =
1498 __cpu_to_le32(TARGET_NUM_MCAST_TABLE_ELEMS);
1499
1500 config.mcast2ucast_mode = __cpu_to_le32(TARGET_MCAST2UCAST_MODE);
1501 config.tx_dbg_log_size = __cpu_to_le32(TARGET_TX_DBG_LOG_SIZE);
1502 config.num_wds_entries = __cpu_to_le32(TARGET_NUM_WDS_ENTRIES);
1503 config.dma_burst_size = __cpu_to_le32(TARGET_DMA_BURST_SIZE);
1504 config.mac_aggr_delim = __cpu_to_le32(TARGET_MAC_AGGR_DELIM);
1505
1506 val = TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
1507 config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
1508
1509 config.vow_config = __cpu_to_le32(TARGET_VOW_CONFIG);
1510
1511 config.gtk_offload_max_vdev =
1512 __cpu_to_le32(TARGET_GTK_OFFLOAD_MAX_VDEV);
1513
1514 config.num_msdu_desc = __cpu_to_le32(TARGET_NUM_MSDU_DESC);
1515 config.max_frag_entries = __cpu_to_le32(TARGET_MAX_FRAG_ENTRIES);
1516
1517 buf = ath10k_wmi_alloc_skb(sizeof(*cmd));
1518 if (!buf)
1519 return -ENOMEM;
1520
1521 cmd = (struct wmi_init_cmd *)buf->data;
1522 cmd->num_host_mem_chunks = 0;
1523 memcpy(&cmd->resource_config, &config, sizeof(config));
1524
1525 ath10k_dbg(ATH10K_DBG_WMI, "wmi init\n");
Bartosz Markowskice428702013-09-26 17:47:05 +02001526 return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001527}
1528
1529static int ath10k_wmi_start_scan_calc_len(const struct wmi_start_scan_arg *arg)
1530{
1531 int len;
1532
1533 len = sizeof(struct wmi_start_scan_cmd);
1534
1535 if (arg->ie_len) {
1536 if (!arg->ie)
1537 return -EINVAL;
1538 if (arg->ie_len > WLAN_SCAN_PARAMS_MAX_IE_LEN)
1539 return -EINVAL;
1540
1541 len += sizeof(struct wmi_ie_data);
1542 len += roundup(arg->ie_len, 4);
1543 }
1544
1545 if (arg->n_channels) {
1546 if (!arg->channels)
1547 return -EINVAL;
1548 if (arg->n_channels > ARRAY_SIZE(arg->channels))
1549 return -EINVAL;
1550
1551 len += sizeof(struct wmi_chan_list);
1552 len += sizeof(__le32) * arg->n_channels;
1553 }
1554
1555 if (arg->n_ssids) {
1556 if (!arg->ssids)
1557 return -EINVAL;
1558 if (arg->n_ssids > WLAN_SCAN_PARAMS_MAX_SSID)
1559 return -EINVAL;
1560
1561 len += sizeof(struct wmi_ssid_list);
1562 len += sizeof(struct wmi_ssid) * arg->n_ssids;
1563 }
1564
1565 if (arg->n_bssids) {
1566 if (!arg->bssids)
1567 return -EINVAL;
1568 if (arg->n_bssids > WLAN_SCAN_PARAMS_MAX_BSSID)
1569 return -EINVAL;
1570
1571 len += sizeof(struct wmi_bssid_list);
1572 len += sizeof(struct wmi_mac_addr) * arg->n_bssids;
1573 }
1574
1575 return len;
1576}
1577
1578int ath10k_wmi_start_scan(struct ath10k *ar,
1579 const struct wmi_start_scan_arg *arg)
1580{
1581 struct wmi_start_scan_cmd *cmd;
1582 struct sk_buff *skb;
1583 struct wmi_ie_data *ie;
1584 struct wmi_chan_list *channels;
1585 struct wmi_ssid_list *ssids;
1586 struct wmi_bssid_list *bssids;
1587 u32 scan_id;
1588 u32 scan_req_id;
1589 int off;
1590 int len = 0;
1591 int i;
1592
1593 len = ath10k_wmi_start_scan_calc_len(arg);
1594 if (len < 0)
1595 return len; /* len contains error code here */
1596
1597 skb = ath10k_wmi_alloc_skb(len);
1598 if (!skb)
1599 return -ENOMEM;
1600
1601 scan_id = WMI_HOST_SCAN_REQ_ID_PREFIX;
1602 scan_id |= arg->scan_id;
1603
1604 scan_req_id = WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
1605 scan_req_id |= arg->scan_req_id;
1606
1607 cmd = (struct wmi_start_scan_cmd *)skb->data;
1608 cmd->scan_id = __cpu_to_le32(scan_id);
1609 cmd->scan_req_id = __cpu_to_le32(scan_req_id);
1610 cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
1611 cmd->scan_priority = __cpu_to_le32(arg->scan_priority);
1612 cmd->notify_scan_events = __cpu_to_le32(arg->notify_scan_events);
1613 cmd->dwell_time_active = __cpu_to_le32(arg->dwell_time_active);
1614 cmd->dwell_time_passive = __cpu_to_le32(arg->dwell_time_passive);
1615 cmd->min_rest_time = __cpu_to_le32(arg->min_rest_time);
1616 cmd->max_rest_time = __cpu_to_le32(arg->max_rest_time);
1617 cmd->repeat_probe_time = __cpu_to_le32(arg->repeat_probe_time);
1618 cmd->probe_spacing_time = __cpu_to_le32(arg->probe_spacing_time);
1619 cmd->idle_time = __cpu_to_le32(arg->idle_time);
1620 cmd->max_scan_time = __cpu_to_le32(arg->max_scan_time);
1621 cmd->probe_delay = __cpu_to_le32(arg->probe_delay);
1622 cmd->scan_ctrl_flags = __cpu_to_le32(arg->scan_ctrl_flags);
1623
1624 /* TLV list starts after fields included in the struct */
1625 off = sizeof(*cmd);
1626
1627 if (arg->n_channels) {
1628 channels = (void *)skb->data + off;
1629 channels->tag = __cpu_to_le32(WMI_CHAN_LIST_TAG);
1630 channels->num_chan = __cpu_to_le32(arg->n_channels);
1631
1632 for (i = 0; i < arg->n_channels; i++)
1633 channels->channel_list[i] =
1634 __cpu_to_le32(arg->channels[i]);
1635
1636 off += sizeof(*channels);
1637 off += sizeof(__le32) * arg->n_channels;
1638 }
1639
1640 if (arg->n_ssids) {
1641 ssids = (void *)skb->data + off;
1642 ssids->tag = __cpu_to_le32(WMI_SSID_LIST_TAG);
1643 ssids->num_ssids = __cpu_to_le32(arg->n_ssids);
1644
1645 for (i = 0; i < arg->n_ssids; i++) {
1646 ssids->ssids[i].ssid_len =
1647 __cpu_to_le32(arg->ssids[i].len);
1648 memcpy(&ssids->ssids[i].ssid,
1649 arg->ssids[i].ssid,
1650 arg->ssids[i].len);
1651 }
1652
1653 off += sizeof(*ssids);
1654 off += sizeof(struct wmi_ssid) * arg->n_ssids;
1655 }
1656
1657 if (arg->n_bssids) {
1658 bssids = (void *)skb->data + off;
1659 bssids->tag = __cpu_to_le32(WMI_BSSID_LIST_TAG);
1660 bssids->num_bssid = __cpu_to_le32(arg->n_bssids);
1661
1662 for (i = 0; i < arg->n_bssids; i++)
1663 memcpy(&bssids->bssid_list[i],
1664 arg->bssids[i].bssid,
1665 ETH_ALEN);
1666
1667 off += sizeof(*bssids);
1668 off += sizeof(struct wmi_mac_addr) * arg->n_bssids;
1669 }
1670
1671 if (arg->ie_len) {
1672 ie = (void *)skb->data + off;
1673 ie->tag = __cpu_to_le32(WMI_IE_TAG);
1674 ie->ie_len = __cpu_to_le32(arg->ie_len);
1675 memcpy(ie->ie_data, arg->ie, arg->ie_len);
1676
1677 off += sizeof(*ie);
1678 off += roundup(arg->ie_len, 4);
1679 }
1680
1681 if (off != skb->len) {
1682 dev_kfree_skb(skb);
1683 return -EINVAL;
1684 }
1685
1686 ath10k_dbg(ATH10K_DBG_WMI, "wmi start scan\n");
Bartosz Markowskice428702013-09-26 17:47:05 +02001687 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001688}
1689
1690void ath10k_wmi_start_scan_init(struct ath10k *ar,
1691 struct wmi_start_scan_arg *arg)
1692{
1693 /* setup commonly used values */
1694 arg->scan_req_id = 1;
1695 arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
1696 arg->dwell_time_active = 50;
1697 arg->dwell_time_passive = 150;
1698 arg->min_rest_time = 50;
1699 arg->max_rest_time = 500;
1700 arg->repeat_probe_time = 0;
1701 arg->probe_spacing_time = 0;
1702 arg->idle_time = 0;
1703 arg->max_scan_time = 5000;
1704 arg->probe_delay = 5;
1705 arg->notify_scan_events = WMI_SCAN_EVENT_STARTED
1706 | WMI_SCAN_EVENT_COMPLETED
1707 | WMI_SCAN_EVENT_BSS_CHANNEL
1708 | WMI_SCAN_EVENT_FOREIGN_CHANNEL
1709 | WMI_SCAN_EVENT_DEQUEUED;
1710 arg->scan_ctrl_flags |= WMI_SCAN_ADD_OFDM_RATES;
1711 arg->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT;
1712 arg->n_bssids = 1;
1713 arg->bssids[0].bssid = "\xFF\xFF\xFF\xFF\xFF\xFF";
1714}
1715
1716int ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
1717{
1718 struct wmi_stop_scan_cmd *cmd;
1719 struct sk_buff *skb;
1720 u32 scan_id;
1721 u32 req_id;
1722
1723 if (arg->req_id > 0xFFF)
1724 return -EINVAL;
1725 if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF)
1726 return -EINVAL;
1727
1728 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1729 if (!skb)
1730 return -ENOMEM;
1731
1732 scan_id = arg->u.scan_id;
1733 scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX;
1734
1735 req_id = arg->req_id;
1736 req_id |= WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
1737
1738 cmd = (struct wmi_stop_scan_cmd *)skb->data;
1739 cmd->req_type = __cpu_to_le32(arg->req_type);
1740 cmd->vdev_id = __cpu_to_le32(arg->u.vdev_id);
1741 cmd->scan_id = __cpu_to_le32(scan_id);
1742 cmd->scan_req_id = __cpu_to_le32(req_id);
1743
1744 ath10k_dbg(ATH10K_DBG_WMI,
1745 "wmi stop scan reqid %d req_type %d vdev/scan_id %d\n",
1746 arg->req_id, arg->req_type, arg->u.scan_id);
Bartosz Markowskice428702013-09-26 17:47:05 +02001747 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001748}
1749
1750int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
1751 enum wmi_vdev_type type,
1752 enum wmi_vdev_subtype subtype,
1753 const u8 macaddr[ETH_ALEN])
1754{
1755 struct wmi_vdev_create_cmd *cmd;
1756 struct sk_buff *skb;
1757
1758 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1759 if (!skb)
1760 return -ENOMEM;
1761
1762 cmd = (struct wmi_vdev_create_cmd *)skb->data;
1763 cmd->vdev_id = __cpu_to_le32(vdev_id);
1764 cmd->vdev_type = __cpu_to_le32(type);
1765 cmd->vdev_subtype = __cpu_to_le32(subtype);
1766 memcpy(cmd->vdev_macaddr.addr, macaddr, ETH_ALEN);
1767
1768 ath10k_dbg(ATH10K_DBG_WMI,
1769 "WMI vdev create: id %d type %d subtype %d macaddr %pM\n",
1770 vdev_id, type, subtype, macaddr);
1771
Bartosz Markowskice428702013-09-26 17:47:05 +02001772 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001773}
1774
1775int ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
1776{
1777 struct wmi_vdev_delete_cmd *cmd;
1778 struct sk_buff *skb;
1779
1780 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1781 if (!skb)
1782 return -ENOMEM;
1783
1784 cmd = (struct wmi_vdev_delete_cmd *)skb->data;
1785 cmd->vdev_id = __cpu_to_le32(vdev_id);
1786
1787 ath10k_dbg(ATH10K_DBG_WMI,
1788 "WMI vdev delete id %d\n", vdev_id);
1789
Bartosz Markowskice428702013-09-26 17:47:05 +02001790 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001791}
1792
1793static int ath10k_wmi_vdev_start_restart(struct ath10k *ar,
1794 const struct wmi_vdev_start_request_arg *arg,
Bartosz Markowskice428702013-09-26 17:47:05 +02001795 u32 cmd_id)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001796{
1797 struct wmi_vdev_start_request_cmd *cmd;
1798 struct sk_buff *skb;
1799 const char *cmdname;
1800 u32 flags = 0;
1801
Bartosz Markowskice428702013-09-26 17:47:05 +02001802 if (cmd_id != ar->wmi.cmd->vdev_start_request_cmdid &&
1803 cmd_id != ar->wmi.cmd->vdev_restart_request_cmdid)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001804 return -EINVAL;
1805 if (WARN_ON(arg->ssid && arg->ssid_len == 0))
1806 return -EINVAL;
1807 if (WARN_ON(arg->hidden_ssid && !arg->ssid))
1808 return -EINVAL;
1809 if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
1810 return -EINVAL;
1811
Bartosz Markowskice428702013-09-26 17:47:05 +02001812 if (cmd_id == ar->wmi.cmd->vdev_start_request_cmdid)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001813 cmdname = "start";
Bartosz Markowskice428702013-09-26 17:47:05 +02001814 else if (cmd_id == ar->wmi.cmd->vdev_restart_request_cmdid)
Kalle Valo5e3dd152013-06-12 20:52:10 +03001815 cmdname = "restart";
1816 else
1817 return -EINVAL; /* should not happen, we already check cmd_id */
1818
1819 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1820 if (!skb)
1821 return -ENOMEM;
1822
1823 if (arg->hidden_ssid)
1824 flags |= WMI_VDEV_START_HIDDEN_SSID;
1825 if (arg->pmf_enabled)
1826 flags |= WMI_VDEV_START_PMF_ENABLED;
1827
1828 cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
1829 cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
1830 cmd->disable_hw_ack = __cpu_to_le32(arg->disable_hw_ack);
1831 cmd->beacon_interval = __cpu_to_le32(arg->bcn_intval);
1832 cmd->dtim_period = __cpu_to_le32(arg->dtim_period);
1833 cmd->flags = __cpu_to_le32(flags);
1834 cmd->bcn_tx_rate = __cpu_to_le32(arg->bcn_tx_rate);
1835 cmd->bcn_tx_power = __cpu_to_le32(arg->bcn_tx_power);
1836
1837 if (arg->ssid) {
1838 cmd->ssid.ssid_len = __cpu_to_le32(arg->ssid_len);
1839 memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
1840 }
1841
1842 cmd->chan.mhz = __cpu_to_le32(arg->channel.freq);
1843
1844 cmd->chan.band_center_freq1 =
1845 __cpu_to_le32(arg->channel.band_center_freq1);
1846
1847 cmd->chan.mode = arg->channel.mode;
1848 cmd->chan.min_power = arg->channel.min_power;
1849 cmd->chan.max_power = arg->channel.max_power;
1850 cmd->chan.reg_power = arg->channel.max_reg_power;
1851 cmd->chan.reg_classid = arg->channel.reg_class_id;
1852 cmd->chan.antenna_max = arg->channel.max_antenna_gain;
1853
1854 ath10k_dbg(ATH10K_DBG_WMI,
1855 "wmi vdev %s id 0x%x freq %d, mode %d, ch_flags: 0x%0X,"
1856 "max_power: %d\n", cmdname, arg->vdev_id, arg->channel.freq,
1857 arg->channel.mode, flags, arg->channel.max_power);
1858
1859 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1860}
1861
1862int ath10k_wmi_vdev_start(struct ath10k *ar,
1863 const struct wmi_vdev_start_request_arg *arg)
1864{
Bartosz Markowskice428702013-09-26 17:47:05 +02001865 u32 cmd_id = ar->wmi.cmd->vdev_start_request_cmdid;
1866
1867 return ath10k_wmi_vdev_start_restart(ar, arg, cmd_id);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001868}
1869
1870int ath10k_wmi_vdev_restart(struct ath10k *ar,
1871 const struct wmi_vdev_start_request_arg *arg)
1872{
Bartosz Markowskice428702013-09-26 17:47:05 +02001873 u32 cmd_id = ar->wmi.cmd->vdev_restart_request_cmdid;
1874
1875 return ath10k_wmi_vdev_start_restart(ar, arg, cmd_id);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001876}
1877
1878int ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
1879{
1880 struct wmi_vdev_stop_cmd *cmd;
1881 struct sk_buff *skb;
1882
1883 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1884 if (!skb)
1885 return -ENOMEM;
1886
1887 cmd = (struct wmi_vdev_stop_cmd *)skb->data;
1888 cmd->vdev_id = __cpu_to_le32(vdev_id);
1889
1890 ath10k_dbg(ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id);
1891
Bartosz Markowskice428702013-09-26 17:47:05 +02001892 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001893}
1894
1895int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
1896{
1897 struct wmi_vdev_up_cmd *cmd;
1898 struct sk_buff *skb;
1899
1900 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1901 if (!skb)
1902 return -ENOMEM;
1903
1904 cmd = (struct wmi_vdev_up_cmd *)skb->data;
1905 cmd->vdev_id = __cpu_to_le32(vdev_id);
1906 cmd->vdev_assoc_id = __cpu_to_le32(aid);
1907 memcpy(&cmd->vdev_bssid.addr, bssid, 6);
1908
1909 ath10k_dbg(ATH10K_DBG_WMI,
1910 "wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
1911 vdev_id, aid, bssid);
1912
Bartosz Markowskice428702013-09-26 17:47:05 +02001913 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001914}
1915
1916int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
1917{
1918 struct wmi_vdev_down_cmd *cmd;
1919 struct sk_buff *skb;
1920
1921 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1922 if (!skb)
1923 return -ENOMEM;
1924
1925 cmd = (struct wmi_vdev_down_cmd *)skb->data;
1926 cmd->vdev_id = __cpu_to_le32(vdev_id);
1927
1928 ath10k_dbg(ATH10K_DBG_WMI,
1929 "wmi mgmt vdev down id 0x%x\n", vdev_id);
1930
Bartosz Markowskice428702013-09-26 17:47:05 +02001931 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001932}
1933
1934int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id,
1935 enum wmi_vdev_param param_id, u32 param_value)
1936{
1937 struct wmi_vdev_set_param_cmd *cmd;
1938 struct sk_buff *skb;
1939
1940 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1941 if (!skb)
1942 return -ENOMEM;
1943
1944 cmd = (struct wmi_vdev_set_param_cmd *)skb->data;
1945 cmd->vdev_id = __cpu_to_le32(vdev_id);
1946 cmd->param_id = __cpu_to_le32(param_id);
1947 cmd->param_value = __cpu_to_le32(param_value);
1948
1949 ath10k_dbg(ATH10K_DBG_WMI,
1950 "wmi vdev id 0x%x set param %d value %d\n",
1951 vdev_id, param_id, param_value);
1952
Bartosz Markowskice428702013-09-26 17:47:05 +02001953 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001954}
1955
1956int ath10k_wmi_vdev_install_key(struct ath10k *ar,
1957 const struct wmi_vdev_install_key_arg *arg)
1958{
1959 struct wmi_vdev_install_key_cmd *cmd;
1960 struct sk_buff *skb;
1961
1962 if (arg->key_cipher == WMI_CIPHER_NONE && arg->key_data != NULL)
1963 return -EINVAL;
1964 if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL)
1965 return -EINVAL;
1966
1967 skb = ath10k_wmi_alloc_skb(sizeof(*cmd) + arg->key_len);
1968 if (!skb)
1969 return -ENOMEM;
1970
1971 cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
1972 cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
1973 cmd->key_idx = __cpu_to_le32(arg->key_idx);
1974 cmd->key_flags = __cpu_to_le32(arg->key_flags);
1975 cmd->key_cipher = __cpu_to_le32(arg->key_cipher);
1976 cmd->key_len = __cpu_to_le32(arg->key_len);
1977 cmd->key_txmic_len = __cpu_to_le32(arg->key_txmic_len);
1978 cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len);
1979
1980 if (arg->macaddr)
1981 memcpy(cmd->peer_macaddr.addr, arg->macaddr, ETH_ALEN);
1982 if (arg->key_data)
1983 memcpy(cmd->key_data, arg->key_data, arg->key_len);
1984
Michal Kaziore0c508a2013-07-05 16:15:17 +03001985 ath10k_dbg(ATH10K_DBG_WMI,
1986 "wmi vdev install key idx %d cipher %d len %d\n",
1987 arg->key_idx, arg->key_cipher, arg->key_len);
Bartosz Markowskice428702013-09-26 17:47:05 +02001988 return ath10k_wmi_cmd_send(ar, skb,
1989 ar->wmi.cmd->vdev_install_key_cmdid);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001990}
1991
1992int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
1993 const u8 peer_addr[ETH_ALEN])
1994{
1995 struct wmi_peer_create_cmd *cmd;
1996 struct sk_buff *skb;
1997
1998 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1999 if (!skb)
2000 return -ENOMEM;
2001
2002 cmd = (struct wmi_peer_create_cmd *)skb->data;
2003 cmd->vdev_id = __cpu_to_le32(vdev_id);
2004 memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN);
2005
2006 ath10k_dbg(ATH10K_DBG_WMI,
2007 "wmi peer create vdev_id %d peer_addr %pM\n",
2008 vdev_id, peer_addr);
Bartosz Markowskice428702013-09-26 17:47:05 +02002009 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002010}
2011
2012int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
2013 const u8 peer_addr[ETH_ALEN])
2014{
2015 struct wmi_peer_delete_cmd *cmd;
2016 struct sk_buff *skb;
2017
2018 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
2019 if (!skb)
2020 return -ENOMEM;
2021
2022 cmd = (struct wmi_peer_delete_cmd *)skb->data;
2023 cmd->vdev_id = __cpu_to_le32(vdev_id);
2024 memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN);
2025
2026 ath10k_dbg(ATH10K_DBG_WMI,
2027 "wmi peer delete vdev_id %d peer_addr %pM\n",
2028 vdev_id, peer_addr);
Bartosz Markowskice428702013-09-26 17:47:05 +02002029 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002030}
2031
2032int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
2033 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
2034{
2035 struct wmi_peer_flush_tids_cmd *cmd;
2036 struct sk_buff *skb;
2037
2038 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
2039 if (!skb)
2040 return -ENOMEM;
2041
2042 cmd = (struct wmi_peer_flush_tids_cmd *)skb->data;
2043 cmd->vdev_id = __cpu_to_le32(vdev_id);
2044 cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap);
2045 memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN);
2046
2047 ath10k_dbg(ATH10K_DBG_WMI,
2048 "wmi peer flush vdev_id %d peer_addr %pM tids %08x\n",
2049 vdev_id, peer_addr, tid_bitmap);
Bartosz Markowskice428702013-09-26 17:47:05 +02002050 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002051}
2052
2053int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id,
2054 const u8 *peer_addr, enum wmi_peer_param param_id,
2055 u32 param_value)
2056{
2057 struct wmi_peer_set_param_cmd *cmd;
2058 struct sk_buff *skb;
2059
2060 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
2061 if (!skb)
2062 return -ENOMEM;
2063
2064 cmd = (struct wmi_peer_set_param_cmd *)skb->data;
2065 cmd->vdev_id = __cpu_to_le32(vdev_id);
2066 cmd->param_id = __cpu_to_le32(param_id);
2067 cmd->param_value = __cpu_to_le32(param_value);
2068 memcpy(&cmd->peer_macaddr.addr, peer_addr, 6);
2069
2070 ath10k_dbg(ATH10K_DBG_WMI,
2071 "wmi vdev %d peer 0x%pM set param %d value %d\n",
2072 vdev_id, peer_addr, param_id, param_value);
2073
Bartosz Markowskice428702013-09-26 17:47:05 +02002074 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002075}
2076
2077int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
2078 enum wmi_sta_ps_mode psmode)
2079{
2080 struct wmi_sta_powersave_mode_cmd *cmd;
2081 struct sk_buff *skb;
2082
2083 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
2084 if (!skb)
2085 return -ENOMEM;
2086
2087 cmd = (struct wmi_sta_powersave_mode_cmd *)skb->data;
2088 cmd->vdev_id = __cpu_to_le32(vdev_id);
2089 cmd->sta_ps_mode = __cpu_to_le32(psmode);
2090
2091 ath10k_dbg(ATH10K_DBG_WMI,
2092 "wmi set powersave id 0x%x mode %d\n",
2093 vdev_id, psmode);
2094
Bartosz Markowskice428702013-09-26 17:47:05 +02002095 return ath10k_wmi_cmd_send(ar, skb,
2096 ar->wmi.cmd->sta_powersave_mode_cmdid);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002097}
2098
2099int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
2100 enum wmi_sta_powersave_param param_id,
2101 u32 value)
2102{
2103 struct wmi_sta_powersave_param_cmd *cmd;
2104 struct sk_buff *skb;
2105
2106 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
2107 if (!skb)
2108 return -ENOMEM;
2109
2110 cmd = (struct wmi_sta_powersave_param_cmd *)skb->data;
2111 cmd->vdev_id = __cpu_to_le32(vdev_id);
2112 cmd->param_id = __cpu_to_le32(param_id);
2113 cmd->param_value = __cpu_to_le32(value);
2114
2115 ath10k_dbg(ATH10K_DBG_WMI,
2116 "wmi sta ps param vdev_id 0x%x param %d value %d\n",
2117 vdev_id, param_id, value);
Bartosz Markowskice428702013-09-26 17:47:05 +02002118 return ath10k_wmi_cmd_send(ar, skb,
2119 ar->wmi.cmd->sta_powersave_param_cmdid);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002120}
2121
2122int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
2123 enum wmi_ap_ps_peer_param param_id, u32 value)
2124{
2125 struct wmi_ap_ps_peer_cmd *cmd;
2126 struct sk_buff *skb;
2127
2128 if (!mac)
2129 return -EINVAL;
2130
2131 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
2132 if (!skb)
2133 return -ENOMEM;
2134
2135 cmd = (struct wmi_ap_ps_peer_cmd *)skb->data;
2136 cmd->vdev_id = __cpu_to_le32(vdev_id);
2137 cmd->param_id = __cpu_to_le32(param_id);
2138 cmd->param_value = __cpu_to_le32(value);
2139 memcpy(&cmd->peer_macaddr, mac, ETH_ALEN);
2140
2141 ath10k_dbg(ATH10K_DBG_WMI,
2142 "wmi ap ps param vdev_id 0x%X param %d value %d mac_addr %pM\n",
2143 vdev_id, param_id, value, mac);
2144
Bartosz Markowskice428702013-09-26 17:47:05 +02002145 return ath10k_wmi_cmd_send(ar, skb,
2146 ar->wmi.cmd->ap_ps_peer_param_cmdid);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002147}
2148
2149int ath10k_wmi_scan_chan_list(struct ath10k *ar,
2150 const struct wmi_scan_chan_list_arg *arg)
2151{
2152 struct wmi_scan_chan_list_cmd *cmd;
2153 struct sk_buff *skb;
2154 struct wmi_channel_arg *ch;
2155 struct wmi_channel *ci;
2156 int len;
2157 int i;
2158
2159 len = sizeof(*cmd) + arg->n_channels * sizeof(struct wmi_channel);
2160
2161 skb = ath10k_wmi_alloc_skb(len);
2162 if (!skb)
2163 return -EINVAL;
2164
2165 cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
2166 cmd->num_scan_chans = __cpu_to_le32(arg->n_channels);
2167
2168 for (i = 0; i < arg->n_channels; i++) {
2169 u32 flags = 0;
2170
2171 ch = &arg->channels[i];
2172 ci = &cmd->chan_info[i];
2173
2174 if (ch->passive)
2175 flags |= WMI_CHAN_FLAG_PASSIVE;
2176 if (ch->allow_ibss)
2177 flags |= WMI_CHAN_FLAG_ADHOC_ALLOWED;
2178 if (ch->allow_ht)
2179 flags |= WMI_CHAN_FLAG_ALLOW_HT;
2180 if (ch->allow_vht)
2181 flags |= WMI_CHAN_FLAG_ALLOW_VHT;
2182 if (ch->ht40plus)
2183 flags |= WMI_CHAN_FLAG_HT40_PLUS;
2184
2185 ci->mhz = __cpu_to_le32(ch->freq);
2186 ci->band_center_freq1 = __cpu_to_le32(ch->freq);
2187 ci->band_center_freq2 = 0;
2188 ci->min_power = ch->min_power;
2189 ci->max_power = ch->max_power;
2190 ci->reg_power = ch->max_reg_power;
2191 ci->antenna_max = ch->max_antenna_gain;
2192 ci->antenna_max = 0;
2193
2194 /* mode & flags share storage */
2195 ci->mode = ch->mode;
2196 ci->flags |= __cpu_to_le32(flags);
2197 }
2198
Bartosz Markowskice428702013-09-26 17:47:05 +02002199 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002200}
2201
2202int ath10k_wmi_peer_assoc(struct ath10k *ar,
2203 const struct wmi_peer_assoc_complete_arg *arg)
2204{
2205 struct wmi_peer_assoc_complete_cmd *cmd;
2206 struct sk_buff *skb;
2207
2208 if (arg->peer_mpdu_density > 16)
2209 return -EINVAL;
2210 if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES)
2211 return -EINVAL;
2212 if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES)
2213 return -EINVAL;
2214
2215 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
2216 if (!skb)
2217 return -ENOMEM;
2218
2219 cmd = (struct wmi_peer_assoc_complete_cmd *)skb->data;
2220 cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
2221 cmd->peer_new_assoc = __cpu_to_le32(arg->peer_reassoc ? 0 : 1);
2222 cmd->peer_associd = __cpu_to_le32(arg->peer_aid);
2223 cmd->peer_flags = __cpu_to_le32(arg->peer_flags);
2224 cmd->peer_caps = __cpu_to_le32(arg->peer_caps);
2225 cmd->peer_listen_intval = __cpu_to_le32(arg->peer_listen_intval);
2226 cmd->peer_ht_caps = __cpu_to_le32(arg->peer_ht_caps);
2227 cmd->peer_max_mpdu = __cpu_to_le32(arg->peer_max_mpdu);
2228 cmd->peer_mpdu_density = __cpu_to_le32(arg->peer_mpdu_density);
2229 cmd->peer_rate_caps = __cpu_to_le32(arg->peer_rate_caps);
2230 cmd->peer_nss = __cpu_to_le32(arg->peer_num_spatial_streams);
2231 cmd->peer_vht_caps = __cpu_to_le32(arg->peer_vht_caps);
2232 cmd->peer_phymode = __cpu_to_le32(arg->peer_phymode);
2233
2234 memcpy(cmd->peer_macaddr.addr, arg->addr, ETH_ALEN);
2235
2236 cmd->peer_legacy_rates.num_rates =
2237 __cpu_to_le32(arg->peer_legacy_rates.num_rates);
2238 memcpy(cmd->peer_legacy_rates.rates, arg->peer_legacy_rates.rates,
2239 arg->peer_legacy_rates.num_rates);
2240
2241 cmd->peer_ht_rates.num_rates =
2242 __cpu_to_le32(arg->peer_ht_rates.num_rates);
2243 memcpy(cmd->peer_ht_rates.rates, arg->peer_ht_rates.rates,
2244 arg->peer_ht_rates.num_rates);
2245
2246 cmd->peer_vht_rates.rx_max_rate =
2247 __cpu_to_le32(arg->peer_vht_rates.rx_max_rate);
2248 cmd->peer_vht_rates.rx_mcs_set =
2249 __cpu_to_le32(arg->peer_vht_rates.rx_mcs_set);
2250 cmd->peer_vht_rates.tx_max_rate =
2251 __cpu_to_le32(arg->peer_vht_rates.tx_max_rate);
2252 cmd->peer_vht_rates.tx_mcs_set =
2253 __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
2254
Michal Kaziore0c508a2013-07-05 16:15:17 +03002255 ath10k_dbg(ATH10K_DBG_WMI,
2256 "wmi peer assoc vdev %d addr %pM\n",
2257 arg->vdev_id, arg->addr);
Bartosz Markowskice428702013-09-26 17:47:05 +02002258 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002259}
2260
Michal Kaziored543882013-09-13 14:16:56 +02002261int ath10k_wmi_beacon_send_nowait(struct ath10k *ar,
2262 const struct wmi_bcn_tx_arg *arg)
Kalle Valo5e3dd152013-06-12 20:52:10 +03002263{
2264 struct wmi_bcn_tx_cmd *cmd;
2265 struct sk_buff *skb;
2266
2267 skb = ath10k_wmi_alloc_skb(sizeof(*cmd) + arg->bcn_len);
2268 if (!skb)
2269 return -ENOMEM;
2270
2271 cmd = (struct wmi_bcn_tx_cmd *)skb->data;
2272 cmd->hdr.vdev_id = __cpu_to_le32(arg->vdev_id);
2273 cmd->hdr.tx_rate = __cpu_to_le32(arg->tx_rate);
2274 cmd->hdr.tx_power = __cpu_to_le32(arg->tx_power);
2275 cmd->hdr.bcn_len = __cpu_to_le32(arg->bcn_len);
2276 memcpy(cmd->bcn, arg->bcn, arg->bcn_len);
2277
Bartosz Markowskice428702013-09-26 17:47:05 +02002278 return ath10k_wmi_cmd_send_nowait(ar, skb, ar->wmi.cmd->bcn_tx_cmdid);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002279}
2280
2281static void ath10k_wmi_pdev_set_wmm_param(struct wmi_wmm_params *params,
2282 const struct wmi_wmm_params_arg *arg)
2283{
2284 params->cwmin = __cpu_to_le32(arg->cwmin);
2285 params->cwmax = __cpu_to_le32(arg->cwmax);
2286 params->aifs = __cpu_to_le32(arg->aifs);
2287 params->txop = __cpu_to_le32(arg->txop);
2288 params->acm = __cpu_to_le32(arg->acm);
2289 params->no_ack = __cpu_to_le32(arg->no_ack);
2290}
2291
2292int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
2293 const struct wmi_pdev_set_wmm_params_arg *arg)
2294{
2295 struct wmi_pdev_set_wmm_params *cmd;
2296 struct sk_buff *skb;
2297
2298 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
2299 if (!skb)
2300 return -ENOMEM;
2301
2302 cmd = (struct wmi_pdev_set_wmm_params *)skb->data;
2303 ath10k_wmi_pdev_set_wmm_param(&cmd->ac_be, &arg->ac_be);
2304 ath10k_wmi_pdev_set_wmm_param(&cmd->ac_bk, &arg->ac_bk);
2305 ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vi, &arg->ac_vi);
2306 ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vo, &arg->ac_vo);
2307
2308 ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set wmm params\n");
Bartosz Markowskice428702013-09-26 17:47:05 +02002309 return ath10k_wmi_cmd_send(ar, skb,
2310 ar->wmi.cmd->pdev_set_wmm_params_cmdid);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002311}
2312
2313int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id)
2314{
2315 struct wmi_request_stats_cmd *cmd;
2316 struct sk_buff *skb;
2317
2318 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
2319 if (!skb)
2320 return -ENOMEM;
2321
2322 cmd = (struct wmi_request_stats_cmd *)skb->data;
2323 cmd->stats_id = __cpu_to_le32(stats_id);
2324
2325 ath10k_dbg(ATH10K_DBG_WMI, "wmi request stats %d\n", (int)stats_id);
Bartosz Markowskice428702013-09-26 17:47:05 +02002326 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002327}
Michal Kazior9cfbce72013-07-16 09:54:36 +02002328
2329int ath10k_wmi_force_fw_hang(struct ath10k *ar,
2330 enum wmi_force_fw_hang_type type, u32 delay_ms)
2331{
2332 struct wmi_force_fw_hang_cmd *cmd;
2333 struct sk_buff *skb;
2334
2335 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
2336 if (!skb)
2337 return -ENOMEM;
2338
2339 cmd = (struct wmi_force_fw_hang_cmd *)skb->data;
2340 cmd->type = __cpu_to_le32(type);
2341 cmd->delay_ms = __cpu_to_le32(delay_ms);
2342
2343 ath10k_dbg(ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n",
2344 type, delay_ms);
Bartosz Markowskice428702013-09-26 17:47:05 +02002345 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
Michal Kazior9cfbce72013-07-16 09:54:36 +02002346}