blob: c9a8bb1186f2615ca80b477e0e384170f593c597 [file] [log] [blame]
Michal Kaziord7579d12014-12-03 10:10:54 +02001/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#ifndef _WMI_OPS_H_
19#define _WMI_OPS_H_
20
21struct ath10k;
22struct sk_buff;
23
24struct wmi_ops {
25 void (*rx)(struct ath10k *ar, struct sk_buff *skb);
26 void (*map_svc)(const __le32 *in, unsigned long *out, size_t len);
27
28 int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb,
29 struct wmi_scan_ev_arg *arg);
30 int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb,
31 struct wmi_mgmt_rx_ev_arg *arg);
32 int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb,
33 struct wmi_ch_info_ev_arg *arg);
34 int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb,
35 struct wmi_vdev_start_ev_arg *arg);
36 int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb,
37 struct wmi_peer_kick_ev_arg *arg);
38 int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb,
39 struct wmi_swba_ev_arg *arg);
Raja Mani991adf72015-08-14 11:13:29 +030040 int (*pull_phyerr_hdr)(struct ath10k *ar, struct sk_buff *skb,
41 struct wmi_phyerr_hdr_arg *arg);
42 int (*pull_phyerr)(struct ath10k *ar, const void *phyerr_buf,
43 int left_len, struct wmi_phyerr_ev_arg *arg);
Michal Kaziord7579d12014-12-03 10:10:54 +020044 int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb,
45 struct wmi_svc_rdy_ev_arg *arg);
46 int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb,
47 struct wmi_rdy_ev_arg *arg);
48 int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb,
49 struct ath10k_fw_stats *stats);
Michal Kaziorc1a46542015-03-10 16:21:54 +020050 int (*pull_roam_ev)(struct ath10k *ar, struct sk_buff *skb,
51 struct wmi_roam_ev_arg *arg);
Janusz Dziedzicf5431e82015-03-23 17:32:53 +020052 int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb,
53 struct wmi_wow_ev_arg *arg);
Michal Kazior84d49112016-08-19 13:37:42 +030054 int (*pull_echo_ev)(struct ath10k *ar, struct sk_buff *skb,
55 struct wmi_echo_ev_arg *arg);
Vivek Natarajan08e75ea2015-08-04 10:45:11 +053056 enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar);
Michal Kaziord7579d12014-12-03 10:10:54 +020057
58 struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
59 struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
60 struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g,
61 u16 rd5g, u16 ctl2g, u16 ctl5g,
62 enum wmi_dfs_region dfs_reg);
63 struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id,
64 u32 value);
65 struct sk_buff *(*gen_init)(struct ath10k *ar);
66 struct sk_buff *(*gen_start_scan)(struct ath10k *ar,
67 const struct wmi_start_scan_arg *arg);
68 struct sk_buff *(*gen_stop_scan)(struct ath10k *ar,
69 const struct wmi_stop_scan_arg *arg);
70 struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id,
71 enum wmi_vdev_type type,
72 enum wmi_vdev_subtype subtype,
73 const u8 macaddr[ETH_ALEN]);
74 struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id);
75 struct sk_buff *(*gen_vdev_start)(struct ath10k *ar,
76 const struct wmi_vdev_start_request_arg *arg,
77 bool restart);
78 struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id);
79 struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid,
80 const u8 *bssid);
81 struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id);
82 struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id,
83 u32 param_id, u32 param_value);
84 struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar,
85 const struct wmi_vdev_install_key_arg *arg);
86 struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar,
87 const struct wmi_vdev_spectral_conf_arg *arg);
88 struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id,
89 u32 trigger, u32 enable);
Michal Kazior6d492fe2015-01-28 09:57:22 +020090 struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id,
91 const struct wmi_wmm_params_all_arg *arg);
Michal Kaziord7579d12014-12-03 10:10:54 +020092 struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id,
Marek Puzyniak7390ed32015-03-30 09:51:52 +030093 const u8 peer_addr[ETH_ALEN],
94 enum wmi_peer_type peer_type);
Michal Kaziord7579d12014-12-03 10:10:54 +020095 struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id,
96 const u8 peer_addr[ETH_ALEN]);
97 struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id,
98 const u8 peer_addr[ETH_ALEN],
99 u32 tid_bitmap);
100 struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id,
101 const u8 *peer_addr,
102 enum wmi_peer_param param_id,
103 u32 param_value);
104 struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar,
105 const struct wmi_peer_assoc_complete_arg *arg);
106 struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id,
107 enum wmi_sta_ps_mode psmode);
108 struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id,
109 enum wmi_sta_powersave_param param_id,
110 u32 value);
111 struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id,
112 const u8 *mac,
113 enum wmi_ap_ps_peer_param param_id,
114 u32 value);
115 struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar,
116 const struct wmi_scan_chan_list_arg *arg);
Michal Kazior9ad50182015-01-29 14:29:47 +0200117 struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id,
118 const void *bcn, size_t bcn_len,
119 u32 bcn_paddr, bool dtim_zero,
120 bool deliver_cab);
Michal Kaziord7579d12014-12-03 10:10:54 +0200121 struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar,
Michal Kazior5e752e42015-01-19 09:53:41 +0100122 const struct wmi_wmm_params_all_arg *arg);
Michal Kaziorde23d3e2015-02-15 16:50:41 +0200123 struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask);
Michal Kaziord7579d12014-12-03 10:10:54 +0200124 struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar,
125 enum wmi_force_fw_hang_type type,
126 u32 delay_ms);
127 struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
Maharaja Kennadyrajanafcbc822016-08-23 15:35:36 +0530128 struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u64 module_enable,
SenthilKumar Jegadeesan467210a2015-01-29 14:36:52 +0530129 u32 log_level);
Michal Kaziord7579d12014-12-03 10:10:54 +0200130 struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
131 struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar);
Rajkumar Manoharanffdd7382014-12-17 12:21:40 +0200132 struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar,
133 u32 period, u32 duration,
134 u32 next_offset,
135 u32 enabled);
Rajkumar Manoharana57a6a22014-12-17 12:22:17 +0200136 struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar);
Rajkumar Manoharandc8ab272015-01-12 14:07:25 +0200137 struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id,
138 const u8 *mac);
Rajkumar Manoharan65c08932015-01-12 14:07:26 +0200139 struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id,
140 const u8 *mac, u32 tid, u32 buf_size);
Rajkumar Manoharan11597412015-01-12 14:07:26 +0200141 struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id,
142 const u8 *mac, u32 tid,
143 u32 status);
Rajkumar Manoharan50abef82015-01-12 14:07:26 +0200144 struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id,
145 const u8 *mac, u32 tid, u32 initiator,
146 u32 reason);
Michal Kaziorbe9ce9d2015-01-13 16:30:11 +0200147 struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id,
148 u32 tim_ie_offset, struct sk_buff *bcn,
149 u32 prb_caps, u32 prb_erp,
150 void *prb_ies, size_t prb_ies_len);
Michal Kazior4c4955f2015-01-13 16:30:11 +0200151 struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id,
152 struct sk_buff *bcn);
Michal Kazior369242b4e2015-01-13 16:30:11 +0200153 struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id,
154 const u8 *p2p_ie);
Janusz Dziedzic0c7e4772015-01-24 12:14:52 +0200155 struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id,
156 const u8 peer_addr[ETH_ALEN],
157 const struct wmi_sta_uapsd_auto_trig_arg *args,
158 u32 num_ac);
Janusz Dziedzic6e8b1882015-01-28 09:57:39 +0200159 struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar,
160 const struct wmi_sta_keepalive_arg *arg);
Janusz Dziedzicf5431e82015-03-23 17:32:53 +0200161 struct sk_buff *(*gen_wow_enable)(struct ath10k *ar);
162 struct sk_buff *(*gen_wow_add_wakeup_event)(struct ath10k *ar, u32 vdev_id,
163 enum wmi_wow_wakeup_event event,
164 u32 enable);
165 struct sk_buff *(*gen_wow_host_wakeup_ind)(struct ath10k *ar);
Janusz Dziedzicd4976102015-03-23 17:32:54 +0200166 struct sk_buff *(*gen_wow_add_pattern)(struct ath10k *ar, u32 vdev_id,
167 u32 pattern_id,
168 const u8 *pattern,
169 const u8 *mask,
170 int pattern_len,
171 int pattern_offset);
172 struct sk_buff *(*gen_wow_del_pattern)(struct ath10k *ar, u32 vdev_id,
173 u32 pattern_id);
Marek Puzyniakad45c882015-03-30 09:51:53 +0300174 struct sk_buff *(*gen_update_fw_tdls_state)(struct ath10k *ar,
175 u32 vdev_id,
176 enum wmi_tdls_state state);
177 struct sk_buff *(*gen_tdls_peer_update)(struct ath10k *ar,
178 const struct wmi_tdls_peer_update_cmd_arg *arg,
179 const struct wmi_tdls_peer_capab_arg *cap,
180 const struct wmi_channel_arg *chan);
Michal Kazior5b272e32015-03-31 10:26:22 +0000181 struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable);
Maharaja Kennadyrajan29542662015-10-05 17:56:38 +0300182 struct sk_buff *(*gen_pdev_get_tpc_config)(struct ath10k *ar,
183 u32 param);
Manikanta Pubbisettybc6f9ae2015-10-16 15:54:52 +0300184 void (*fw_stats_fill)(struct ath10k *ar,
185 struct ath10k_fw_stats *fw_stats,
186 char *buf);
Maharaja62f77f02015-10-21 11:49:18 +0300187 struct sk_buff *(*gen_pdev_enable_adaptive_cca)(struct ath10k *ar,
188 u8 enable,
189 u32 detect_level,
190 u32 detect_margin);
Raja Mani47771902016-03-16 18:13:33 +0530191 struct sk_buff *(*ext_resource_config)(struct ath10k *ar,
192 enum wmi_host_platform_type type,
193 u32 fw_feature_bitmap);
Peter Oh6e4de1a2016-01-28 13:54:10 -0800194 int (*get_vdev_subtype)(struct ath10k *ar,
195 enum wmi_vdev_subtype subtype);
Rajkumar Manoharan8a0b4592016-04-27 16:23:20 +0530196 struct sk_buff *(*gen_pdev_bss_chan_info_req)
197 (struct ath10k *ar,
198 enum wmi_bss_survey_req_type type);
Michal Kaziore25854f2016-08-19 13:37:41 +0300199 struct sk_buff *(*gen_echo)(struct ath10k *ar, u32 value);
Michal Kaziord7579d12014-12-03 10:10:54 +0200200};
201
202int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
203
204static inline int
205ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb)
206{
207 if (WARN_ON_ONCE(!ar->wmi.ops->rx))
208 return -EOPNOTSUPP;
209
210 ar->wmi.ops->rx(ar, skb);
211 return 0;
212}
213
214static inline int
215ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out,
216 size_t len)
217{
218 if (!ar->wmi.ops->map_svc)
219 return -EOPNOTSUPP;
220
221 ar->wmi.ops->map_svc(in, out, len);
222 return 0;
223}
224
225static inline int
226ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb,
227 struct wmi_scan_ev_arg *arg)
228{
229 if (!ar->wmi.ops->pull_scan)
230 return -EOPNOTSUPP;
231
232 return ar->wmi.ops->pull_scan(ar, skb, arg);
233}
234
235static inline int
236ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb,
237 struct wmi_mgmt_rx_ev_arg *arg)
238{
239 if (!ar->wmi.ops->pull_mgmt_rx)
240 return -EOPNOTSUPP;
241
242 return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg);
243}
244
245static inline int
246ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb,
247 struct wmi_ch_info_ev_arg *arg)
248{
249 if (!ar->wmi.ops->pull_ch_info)
250 return -EOPNOTSUPP;
251
252 return ar->wmi.ops->pull_ch_info(ar, skb, arg);
253}
254
255static inline int
256ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb,
257 struct wmi_vdev_start_ev_arg *arg)
258{
259 if (!ar->wmi.ops->pull_vdev_start)
260 return -EOPNOTSUPP;
261
262 return ar->wmi.ops->pull_vdev_start(ar, skb, arg);
263}
264
265static inline int
266ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb,
267 struct wmi_peer_kick_ev_arg *arg)
268{
269 if (!ar->wmi.ops->pull_peer_kick)
270 return -EOPNOTSUPP;
271
272 return ar->wmi.ops->pull_peer_kick(ar, skb, arg);
273}
274
275static inline int
276ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb,
277 struct wmi_swba_ev_arg *arg)
278{
279 if (!ar->wmi.ops->pull_swba)
280 return -EOPNOTSUPP;
281
282 return ar->wmi.ops->pull_swba(ar, skb, arg);
283}
284
285static inline int
Raja Mani991adf72015-08-14 11:13:29 +0300286ath10k_wmi_pull_phyerr_hdr(struct ath10k *ar, struct sk_buff *skb,
287 struct wmi_phyerr_hdr_arg *arg)
288{
289 if (!ar->wmi.ops->pull_phyerr_hdr)
290 return -EOPNOTSUPP;
291
292 return ar->wmi.ops->pull_phyerr_hdr(ar, skb, arg);
293}
294
295static inline int
296ath10k_wmi_pull_phyerr(struct ath10k *ar, const void *phyerr_buf,
297 int left_len, struct wmi_phyerr_ev_arg *arg)
Michal Kaziord7579d12014-12-03 10:10:54 +0200298{
299 if (!ar->wmi.ops->pull_phyerr)
300 return -EOPNOTSUPP;
301
Raja Mani991adf72015-08-14 11:13:29 +0300302 return ar->wmi.ops->pull_phyerr(ar, phyerr_buf, left_len, arg);
Michal Kaziord7579d12014-12-03 10:10:54 +0200303}
304
305static inline int
306ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb,
307 struct wmi_svc_rdy_ev_arg *arg)
308{
309 if (!ar->wmi.ops->pull_svc_rdy)
310 return -EOPNOTSUPP;
311
312 return ar->wmi.ops->pull_svc_rdy(ar, skb, arg);
313}
314
315static inline int
316ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb,
317 struct wmi_rdy_ev_arg *arg)
318{
319 if (!ar->wmi.ops->pull_rdy)
320 return -EOPNOTSUPP;
321
322 return ar->wmi.ops->pull_rdy(ar, skb, arg);
323}
324
325static inline int
326ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
327 struct ath10k_fw_stats *stats)
328{
329 if (!ar->wmi.ops->pull_fw_stats)
330 return -EOPNOTSUPP;
331
332 return ar->wmi.ops->pull_fw_stats(ar, skb, stats);
333}
334
335static inline int
Michal Kaziorc1a46542015-03-10 16:21:54 +0200336ath10k_wmi_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
337 struct wmi_roam_ev_arg *arg)
338{
339 if (!ar->wmi.ops->pull_roam_ev)
340 return -EOPNOTSUPP;
341
342 return ar->wmi.ops->pull_roam_ev(ar, skb, arg);
343}
344
345static inline int
Janusz Dziedzicf5431e82015-03-23 17:32:53 +0200346ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb,
347 struct wmi_wow_ev_arg *arg)
348{
349 if (!ar->wmi.ops->pull_wow_event)
350 return -EOPNOTSUPP;
351
352 return ar->wmi.ops->pull_wow_event(ar, skb, arg);
353}
354
Michal Kazior84d49112016-08-19 13:37:42 +0300355static inline int
356ath10k_wmi_pull_echo_ev(struct ath10k *ar, struct sk_buff *skb,
357 struct wmi_echo_ev_arg *arg)
358{
359 if (!ar->wmi.ops->pull_echo_ev)
360 return -EOPNOTSUPP;
361
362 return ar->wmi.ops->pull_echo_ev(ar, skb, arg);
363}
364
Vivek Natarajan08e75ea2015-08-04 10:45:11 +0530365static inline enum wmi_txbf_conf
366ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar)
367{
368 if (!ar->wmi.ops->get_txbf_conf_scheme)
369 return WMI_TXBF_CONF_UNSUPPORTED;
370
371 return ar->wmi.ops->get_txbf_conf_scheme(ar);
372}
373
Janusz Dziedzicf5431e82015-03-23 17:32:53 +0200374static inline int
Michal Kaziord7579d12014-12-03 10:10:54 +0200375ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
376{
377 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
378 struct sk_buff *skb;
379 int ret;
380
381 if (!ar->wmi.ops->gen_mgmt_tx)
382 return -EOPNOTSUPP;
383
384 skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu);
385 if (IS_ERR(skb))
386 return PTR_ERR(skb);
387
388 ret = ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->mgmt_tx_cmdid);
389 if (ret)
390 return ret;
391
392 /* FIXME There's no ACK event for Management Tx. This probably
393 * shouldn't be called here either. */
394 info->flags |= IEEE80211_TX_STAT_ACK;
395 ieee80211_tx_status_irqsafe(ar->hw, msdu);
396
397 return 0;
398}
399
400static inline int
401ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
402 u16 ctl2g, u16 ctl5g,
403 enum wmi_dfs_region dfs_reg)
404{
405 struct sk_buff *skb;
406
407 if (!ar->wmi.ops->gen_pdev_set_rd)
408 return -EOPNOTSUPP;
409
410 skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g,
411 dfs_reg);
412 if (IS_ERR(skb))
413 return PTR_ERR(skb);
414
415 return ath10k_wmi_cmd_send(ar, skb,
416 ar->wmi.cmd->pdev_set_regdomain_cmdid);
417}
418
419static inline int
420ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
421{
422 struct sk_buff *skb;
423
424 if (!ar->wmi.ops->gen_pdev_suspend)
425 return -EOPNOTSUPP;
426
427 skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt);
428 if (IS_ERR(skb))
429 return PTR_ERR(skb);
430
431 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
432}
433
434static inline int
435ath10k_wmi_pdev_resume_target(struct ath10k *ar)
436{
437 struct sk_buff *skb;
438
439 if (!ar->wmi.ops->gen_pdev_resume)
440 return -EOPNOTSUPP;
441
442 skb = ar->wmi.ops->gen_pdev_resume(ar);
443 if (IS_ERR(skb))
444 return PTR_ERR(skb);
445
446 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
447}
448
449static inline int
450ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
451{
452 struct sk_buff *skb;
453
454 if (!ar->wmi.ops->gen_pdev_set_param)
455 return -EOPNOTSUPP;
456
457 skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value);
458 if (IS_ERR(skb))
459 return PTR_ERR(skb);
460
461 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
462}
463
464static inline int
465ath10k_wmi_cmd_init(struct ath10k *ar)
466{
467 struct sk_buff *skb;
468
469 if (!ar->wmi.ops->gen_init)
470 return -EOPNOTSUPP;
471
472 skb = ar->wmi.ops->gen_init(ar);
473 if (IS_ERR(skb))
474 return PTR_ERR(skb);
475
476 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid);
477}
478
479static inline int
480ath10k_wmi_start_scan(struct ath10k *ar,
481 const struct wmi_start_scan_arg *arg)
482{
483 struct sk_buff *skb;
484
485 if (!ar->wmi.ops->gen_start_scan)
486 return -EOPNOTSUPP;
487
488 skb = ar->wmi.ops->gen_start_scan(ar, arg);
489 if (IS_ERR(skb))
490 return PTR_ERR(skb);
491
492 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
493}
494
495static inline int
496ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
497{
498 struct sk_buff *skb;
499
500 if (!ar->wmi.ops->gen_stop_scan)
501 return -EOPNOTSUPP;
502
503 skb = ar->wmi.ops->gen_stop_scan(ar, arg);
504 if (IS_ERR(skb))
505 return PTR_ERR(skb);
506
507 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
508}
509
510static inline int
511ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
512 enum wmi_vdev_type type,
513 enum wmi_vdev_subtype subtype,
514 const u8 macaddr[ETH_ALEN])
515{
516 struct sk_buff *skb;
517
518 if (!ar->wmi.ops->gen_vdev_create)
519 return -EOPNOTSUPP;
520
521 skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr);
522 if (IS_ERR(skb))
523 return PTR_ERR(skb);
524
525 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
526}
527
528static inline int
529ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
530{
531 struct sk_buff *skb;
532
533 if (!ar->wmi.ops->gen_vdev_delete)
534 return -EOPNOTSUPP;
535
536 skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id);
537 if (IS_ERR(skb))
538 return PTR_ERR(skb);
539
540 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
541}
542
543static inline int
544ath10k_wmi_vdev_start(struct ath10k *ar,
545 const struct wmi_vdev_start_request_arg *arg)
546{
547 struct sk_buff *skb;
548
549 if (!ar->wmi.ops->gen_vdev_start)
550 return -EOPNOTSUPP;
551
552 skb = ar->wmi.ops->gen_vdev_start(ar, arg, false);
553 if (IS_ERR(skb))
554 return PTR_ERR(skb);
555
556 return ath10k_wmi_cmd_send(ar, skb,
557 ar->wmi.cmd->vdev_start_request_cmdid);
558}
559
560static inline int
561ath10k_wmi_vdev_restart(struct ath10k *ar,
562 const struct wmi_vdev_start_request_arg *arg)
563{
564 struct sk_buff *skb;
565
566 if (!ar->wmi.ops->gen_vdev_start)
567 return -EOPNOTSUPP;
568
569 skb = ar->wmi.ops->gen_vdev_start(ar, arg, true);
570 if (IS_ERR(skb))
571 return PTR_ERR(skb);
572
573 return ath10k_wmi_cmd_send(ar, skb,
574 ar->wmi.cmd->vdev_restart_request_cmdid);
575}
576
577static inline int
578ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
579{
580 struct sk_buff *skb;
581
582 if (!ar->wmi.ops->gen_vdev_stop)
583 return -EOPNOTSUPP;
584
585 skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id);
586 if (IS_ERR(skb))
587 return PTR_ERR(skb);
588
589 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
590}
591
592static inline int
593ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
594{
595 struct sk_buff *skb;
596
597 if (!ar->wmi.ops->gen_vdev_up)
598 return -EOPNOTSUPP;
599
600 skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid);
601 if (IS_ERR(skb))
602 return PTR_ERR(skb);
603
604 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
605}
606
607static inline int
608ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
609{
610 struct sk_buff *skb;
611
612 if (!ar->wmi.ops->gen_vdev_down)
613 return -EOPNOTSUPP;
614
615 skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id);
616 if (IS_ERR(skb))
617 return PTR_ERR(skb);
618
619 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
620}
621
622static inline int
623ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id,
624 u32 param_value)
625{
626 struct sk_buff *skb;
627
628 if (!ar->wmi.ops->gen_vdev_set_param)
629 return -EOPNOTSUPP;
630
631 skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id,
632 param_value);
633 if (IS_ERR(skb))
634 return PTR_ERR(skb);
635
636 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
637}
638
639static inline int
640ath10k_wmi_vdev_install_key(struct ath10k *ar,
641 const struct wmi_vdev_install_key_arg *arg)
642{
643 struct sk_buff *skb;
644
645 if (!ar->wmi.ops->gen_vdev_install_key)
646 return -EOPNOTSUPP;
647
648 skb = ar->wmi.ops->gen_vdev_install_key(ar, arg);
649 if (IS_ERR(skb))
650 return PTR_ERR(skb);
651
652 return ath10k_wmi_cmd_send(ar, skb,
653 ar->wmi.cmd->vdev_install_key_cmdid);
654}
655
656static inline int
657ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
658 const struct wmi_vdev_spectral_conf_arg *arg)
659{
660 struct sk_buff *skb;
661 u32 cmd_id;
662
663 skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg);
664 if (IS_ERR(skb))
665 return PTR_ERR(skb);
666
667 cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid;
668 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
669}
670
671static inline int
672ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
673 u32 enable)
674{
675 struct sk_buff *skb;
676 u32 cmd_id;
677
678 skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger,
679 enable);
680 if (IS_ERR(skb))
681 return PTR_ERR(skb);
682
683 cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid;
684 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
685}
686
687static inline int
Janusz Dziedzic0c7e4772015-01-24 12:14:52 +0200688ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
689 const u8 peer_addr[ETH_ALEN],
690 const struct wmi_sta_uapsd_auto_trig_arg *args,
691 u32 num_ac)
692{
693 struct sk_buff *skb;
694 u32 cmd_id;
695
696 if (!ar->wmi.ops->gen_vdev_sta_uapsd)
697 return -EOPNOTSUPP;
698
699 skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args,
700 num_ac);
701 if (IS_ERR(skb))
702 return PTR_ERR(skb);
703
704 cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid;
705 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
706}
707
708static inline int
Michal Kazior6d492fe2015-01-28 09:57:22 +0200709ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
710 const struct wmi_wmm_params_all_arg *arg)
711{
712 struct sk_buff *skb;
713 u32 cmd_id;
714
715 skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg);
716 if (IS_ERR(skb))
717 return PTR_ERR(skb);
718
719 cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid;
720 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
721}
722
723static inline int
Michal Kaziord7579d12014-12-03 10:10:54 +0200724ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
Marek Puzyniak7390ed32015-03-30 09:51:52 +0300725 const u8 peer_addr[ETH_ALEN],
726 enum wmi_peer_type peer_type)
Michal Kaziord7579d12014-12-03 10:10:54 +0200727{
728 struct sk_buff *skb;
729
730 if (!ar->wmi.ops->gen_peer_create)
731 return -EOPNOTSUPP;
732
Marek Puzyniak7390ed32015-03-30 09:51:52 +0300733 skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr, peer_type);
Michal Kaziord7579d12014-12-03 10:10:54 +0200734 if (IS_ERR(skb))
735 return PTR_ERR(skb);
736
737 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
738}
739
740static inline int
741ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
742 const u8 peer_addr[ETH_ALEN])
743{
744 struct sk_buff *skb;
745
746 if (!ar->wmi.ops->gen_peer_delete)
747 return -EOPNOTSUPP;
748
749 skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr);
750 if (IS_ERR(skb))
751 return PTR_ERR(skb);
752
753 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
754}
755
756static inline int
757ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
758 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
759{
760 struct sk_buff *skb;
761
762 if (!ar->wmi.ops->gen_peer_flush)
763 return -EOPNOTSUPP;
764
765 skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap);
766 if (IS_ERR(skb))
767 return PTR_ERR(skb);
768
769 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
770}
771
772static inline int
773ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr,
774 enum wmi_peer_param param_id, u32 param_value)
775{
776 struct sk_buff *skb;
777
778 if (!ar->wmi.ops->gen_peer_set_param)
779 return -EOPNOTSUPP;
780
781 skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id,
782 param_value);
783 if (IS_ERR(skb))
784 return PTR_ERR(skb);
785
786 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
787}
788
789static inline int
790ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
791 enum wmi_sta_ps_mode psmode)
792{
793 struct sk_buff *skb;
794
795 if (!ar->wmi.ops->gen_set_psmode)
796 return -EOPNOTSUPP;
797
798 skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode);
799 if (IS_ERR(skb))
800 return PTR_ERR(skb);
801
802 return ath10k_wmi_cmd_send(ar, skb,
803 ar->wmi.cmd->sta_powersave_mode_cmdid);
804}
805
806static inline int
807ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
808 enum wmi_sta_powersave_param param_id, u32 value)
809{
810 struct sk_buff *skb;
811
812 if (!ar->wmi.ops->gen_set_sta_ps)
813 return -EOPNOTSUPP;
814
815 skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value);
816 if (IS_ERR(skb))
817 return PTR_ERR(skb);
818
819 return ath10k_wmi_cmd_send(ar, skb,
820 ar->wmi.cmd->sta_powersave_param_cmdid);
821}
822
823static inline int
824ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
825 enum wmi_ap_ps_peer_param param_id, u32 value)
826{
827 struct sk_buff *skb;
828
829 if (!ar->wmi.ops->gen_set_ap_ps)
830 return -EOPNOTSUPP;
831
832 skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value);
833 if (IS_ERR(skb))
834 return PTR_ERR(skb);
835
836 return ath10k_wmi_cmd_send(ar, skb,
837 ar->wmi.cmd->ap_ps_peer_param_cmdid);
838}
839
840static inline int
841ath10k_wmi_scan_chan_list(struct ath10k *ar,
842 const struct wmi_scan_chan_list_arg *arg)
843{
844 struct sk_buff *skb;
845
846 if (!ar->wmi.ops->gen_scan_chan_list)
847 return -EOPNOTSUPP;
848
849 skb = ar->wmi.ops->gen_scan_chan_list(ar, arg);
850 if (IS_ERR(skb))
851 return PTR_ERR(skb);
852
853 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
854}
855
856static inline int
857ath10k_wmi_peer_assoc(struct ath10k *ar,
858 const struct wmi_peer_assoc_complete_arg *arg)
859{
860 struct sk_buff *skb;
861
862 if (!ar->wmi.ops->gen_peer_assoc)
863 return -EOPNOTSUPP;
864
865 skb = ar->wmi.ops->gen_peer_assoc(ar, arg);
866 if (IS_ERR(skb))
867 return PTR_ERR(skb);
868
869 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
870}
871
872static inline int
Michal Kazior9ad50182015-01-29 14:29:47 +0200873ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id,
874 const void *bcn, size_t bcn_len,
875 u32 bcn_paddr, bool dtim_zero,
876 bool deliver_cab)
Michal Kaziord7579d12014-12-03 10:10:54 +0200877{
Michal Kaziord7579d12014-12-03 10:10:54 +0200878 struct sk_buff *skb;
879 int ret;
880
881 if (!ar->wmi.ops->gen_beacon_dma)
882 return -EOPNOTSUPP;
883
Michal Kazior9ad50182015-01-29 14:29:47 +0200884 skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr,
885 dtim_zero, deliver_cab);
Michal Kaziord7579d12014-12-03 10:10:54 +0200886 if (IS_ERR(skb))
887 return PTR_ERR(skb);
888
889 ret = ath10k_wmi_cmd_send_nowait(ar, skb,
890 ar->wmi.cmd->pdev_send_bcn_cmdid);
891 if (ret) {
892 dev_kfree_skb(skb);
893 return ret;
894 }
895
896 return 0;
897}
898
899static inline int
900ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
Michal Kazior5e752e42015-01-19 09:53:41 +0100901 const struct wmi_wmm_params_all_arg *arg)
Michal Kaziord7579d12014-12-03 10:10:54 +0200902{
903 struct sk_buff *skb;
904
905 if (!ar->wmi.ops->gen_pdev_set_wmm)
906 return -EOPNOTSUPP;
907
908 skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg);
909 if (IS_ERR(skb))
910 return PTR_ERR(skb);
911
912 return ath10k_wmi_cmd_send(ar, skb,
913 ar->wmi.cmd->pdev_set_wmm_params_cmdid);
914}
915
916static inline int
Michal Kaziorde23d3e2015-02-15 16:50:41 +0200917ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask)
Michal Kaziord7579d12014-12-03 10:10:54 +0200918{
919 struct sk_buff *skb;
920
921 if (!ar->wmi.ops->gen_request_stats)
922 return -EOPNOTSUPP;
923
Michal Kaziorde23d3e2015-02-15 16:50:41 +0200924 skb = ar->wmi.ops->gen_request_stats(ar, stats_mask);
Michal Kaziord7579d12014-12-03 10:10:54 +0200925 if (IS_ERR(skb))
926 return PTR_ERR(skb);
927
928 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
929}
930
931static inline int
932ath10k_wmi_force_fw_hang(struct ath10k *ar,
933 enum wmi_force_fw_hang_type type, u32 delay_ms)
934{
935 struct sk_buff *skb;
936
937 if (!ar->wmi.ops->gen_force_fw_hang)
938 return -EOPNOTSUPP;
939
940 skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms);
941 if (IS_ERR(skb))
942 return PTR_ERR(skb);
943
944 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
945}
946
947static inline int
Maharaja Kennadyrajanafcbc822016-08-23 15:35:36 +0530948ath10k_wmi_dbglog_cfg(struct ath10k *ar, u64 module_enable, u32 log_level)
Michal Kaziord7579d12014-12-03 10:10:54 +0200949{
950 struct sk_buff *skb;
951
952 if (!ar->wmi.ops->gen_dbglog_cfg)
953 return -EOPNOTSUPP;
954
SenthilKumar Jegadeesan467210a2015-01-29 14:36:52 +0530955 skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level);
Michal Kaziord7579d12014-12-03 10:10:54 +0200956 if (IS_ERR(skb))
957 return PTR_ERR(skb);
958
959 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
960}
961
962static inline int
963ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter)
964{
965 struct sk_buff *skb;
966
967 if (!ar->wmi.ops->gen_pktlog_enable)
968 return -EOPNOTSUPP;
969
970 skb = ar->wmi.ops->gen_pktlog_enable(ar, filter);
971 if (IS_ERR(skb))
972 return PTR_ERR(skb);
973
974 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid);
975}
976
977static inline int
978ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar)
979{
980 struct sk_buff *skb;
981
982 if (!ar->wmi.ops->gen_pktlog_disable)
983 return -EOPNOTSUPP;
984
985 skb = ar->wmi.ops->gen_pktlog_disable(ar);
986 if (IS_ERR(skb))
987 return PTR_ERR(skb);
988
989 return ath10k_wmi_cmd_send(ar, skb,
990 ar->wmi.cmd->pdev_pktlog_disable_cmdid);
991}
992
Rajkumar Manoharanffdd7382014-12-17 12:21:40 +0200993static inline int
994ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration,
995 u32 next_offset, u32 enabled)
996{
997 struct sk_buff *skb;
998
999 if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
1000 return -EOPNOTSUPP;
1001
1002 skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration,
1003 next_offset, enabled);
1004 if (IS_ERR(skb))
1005 return PTR_ERR(skb);
1006
1007 return ath10k_wmi_cmd_send(ar, skb,
1008 ar->wmi.cmd->pdev_set_quiet_mode_cmdid);
1009}
1010
Rajkumar Manoharana57a6a22014-12-17 12:22:17 +02001011static inline int
1012ath10k_wmi_pdev_get_temperature(struct ath10k *ar)
1013{
1014 struct sk_buff *skb;
1015
1016 if (!ar->wmi.ops->gen_pdev_get_temperature)
1017 return -EOPNOTSUPP;
1018
1019 skb = ar->wmi.ops->gen_pdev_get_temperature(ar);
1020 if (IS_ERR(skb))
1021 return PTR_ERR(skb);
1022
1023 return ath10k_wmi_cmd_send(ar, skb,
1024 ar->wmi.cmd->pdev_get_temperature_cmdid);
1025}
1026
Rajkumar Manoharandc8ab272015-01-12 14:07:25 +02001027static inline int
1028ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac)
1029{
1030 struct sk_buff *skb;
1031
1032 if (!ar->wmi.ops->gen_addba_clear_resp)
1033 return -EOPNOTSUPP;
1034
1035 skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac);
1036 if (IS_ERR(skb))
1037 return PTR_ERR(skb);
1038
1039 return ath10k_wmi_cmd_send(ar, skb,
1040 ar->wmi.cmd->addba_clear_resp_cmdid);
1041}
1042
Rajkumar Manoharan65c08932015-01-12 14:07:26 +02001043static inline int
1044ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1045 u32 tid, u32 buf_size)
1046{
1047 struct sk_buff *skb;
1048
1049 if (!ar->wmi.ops->gen_addba_send)
1050 return -EOPNOTSUPP;
1051
1052 skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size);
1053 if (IS_ERR(skb))
1054 return PTR_ERR(skb);
1055
1056 return ath10k_wmi_cmd_send(ar, skb,
1057 ar->wmi.cmd->addba_send_cmdid);
1058}
1059
Rajkumar Manoharan11597412015-01-12 14:07:26 +02001060static inline int
1061ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1062 u32 tid, u32 status)
1063{
1064 struct sk_buff *skb;
1065
1066 if (!ar->wmi.ops->gen_addba_set_resp)
1067 return -EOPNOTSUPP;
1068
1069 skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status);
1070 if (IS_ERR(skb))
1071 return PTR_ERR(skb);
1072
1073 return ath10k_wmi_cmd_send(ar, skb,
1074 ar->wmi.cmd->addba_set_resp_cmdid);
1075}
1076
Rajkumar Manoharan50abef82015-01-12 14:07:26 +02001077static inline int
1078ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1079 u32 tid, u32 initiator, u32 reason)
1080{
1081 struct sk_buff *skb;
1082
1083 if (!ar->wmi.ops->gen_delba_send)
1084 return -EOPNOTSUPP;
1085
1086 skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator,
1087 reason);
1088 if (IS_ERR(skb))
1089 return PTR_ERR(skb);
1090
1091 return ath10k_wmi_cmd_send(ar, skb,
1092 ar->wmi.cmd->delba_send_cmdid);
1093}
1094
Michal Kaziorbe9ce9d2015-01-13 16:30:11 +02001095static inline int
1096ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset,
1097 struct sk_buff *bcn, u32 prb_caps, u32 prb_erp,
1098 void *prb_ies, size_t prb_ies_len)
1099{
1100 struct sk_buff *skb;
1101
1102 if (!ar->wmi.ops->gen_bcn_tmpl)
1103 return -EOPNOTSUPP;
1104
1105 skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn,
1106 prb_caps, prb_erp, prb_ies,
1107 prb_ies_len);
1108 if (IS_ERR(skb))
1109 return PTR_ERR(skb);
1110
1111 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid);
1112}
1113
Michal Kazior4c4955f2015-01-13 16:30:11 +02001114static inline int
1115ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb)
1116{
1117 struct sk_buff *skb;
1118
1119 if (!ar->wmi.ops->gen_prb_tmpl)
1120 return -EOPNOTSUPP;
1121
1122 skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb);
1123 if (IS_ERR(skb))
1124 return PTR_ERR(skb);
1125
1126 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid);
1127}
1128
Michal Kazior369242b4e2015-01-13 16:30:11 +02001129static inline int
1130ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie)
1131{
1132 struct sk_buff *skb;
1133
1134 if (!ar->wmi.ops->gen_p2p_go_bcn_ie)
1135 return -EOPNOTSUPP;
1136
1137 skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie);
1138 if (IS_ERR(skb))
1139 return PTR_ERR(skb);
1140
1141 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie);
1142}
1143
Janusz Dziedzic6e8b1882015-01-28 09:57:39 +02001144static inline int
1145ath10k_wmi_sta_keepalive(struct ath10k *ar,
1146 const struct wmi_sta_keepalive_arg *arg)
1147{
1148 struct sk_buff *skb;
1149 u32 cmd_id;
1150
1151 if (!ar->wmi.ops->gen_sta_keepalive)
1152 return -EOPNOTSUPP;
1153
1154 skb = ar->wmi.ops->gen_sta_keepalive(ar, arg);
1155 if (IS_ERR(skb))
1156 return PTR_ERR(skb);
1157
1158 cmd_id = ar->wmi.cmd->sta_keepalive_cmd;
1159 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1160}
1161
Janusz Dziedzicf5431e82015-03-23 17:32:53 +02001162static inline int
1163ath10k_wmi_wow_enable(struct ath10k *ar)
1164{
1165 struct sk_buff *skb;
1166 u32 cmd_id;
1167
1168 if (!ar->wmi.ops->gen_wow_enable)
1169 return -EOPNOTSUPP;
1170
1171 skb = ar->wmi.ops->gen_wow_enable(ar);
1172 if (IS_ERR(skb))
1173 return PTR_ERR(skb);
1174
1175 cmd_id = ar->wmi.cmd->wow_enable_cmdid;
1176 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1177}
1178
1179static inline int
1180ath10k_wmi_wow_add_wakeup_event(struct ath10k *ar, u32 vdev_id,
1181 enum wmi_wow_wakeup_event event,
1182 u32 enable)
1183{
1184 struct sk_buff *skb;
1185 u32 cmd_id;
1186
1187 if (!ar->wmi.ops->gen_wow_add_wakeup_event)
1188 return -EOPNOTSUPP;
1189
1190 skb = ar->wmi.ops->gen_wow_add_wakeup_event(ar, vdev_id, event, enable);
1191 if (IS_ERR(skb))
1192 return PTR_ERR(skb);
1193
1194 cmd_id = ar->wmi.cmd->wow_enable_disable_wake_event_cmdid;
1195 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1196}
1197
1198static inline int
1199ath10k_wmi_wow_host_wakeup_ind(struct ath10k *ar)
1200{
1201 struct sk_buff *skb;
1202 u32 cmd_id;
1203
1204 if (!ar->wmi.ops->gen_wow_host_wakeup_ind)
1205 return -EOPNOTSUPP;
1206
1207 skb = ar->wmi.ops->gen_wow_host_wakeup_ind(ar);
1208 if (IS_ERR(skb))
1209 return PTR_ERR(skb);
1210
1211 cmd_id = ar->wmi.cmd->wow_hostwakeup_from_sleep_cmdid;
1212 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1213}
1214
Janusz Dziedzicd4976102015-03-23 17:32:54 +02001215static inline int
1216ath10k_wmi_wow_add_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id,
1217 const u8 *pattern, const u8 *mask,
1218 int pattern_len, int pattern_offset)
1219{
1220 struct sk_buff *skb;
1221 u32 cmd_id;
1222
1223 if (!ar->wmi.ops->gen_wow_add_pattern)
1224 return -EOPNOTSUPP;
1225
1226 skb = ar->wmi.ops->gen_wow_add_pattern(ar, vdev_id, pattern_id,
1227 pattern, mask, pattern_len,
1228 pattern_offset);
1229 if (IS_ERR(skb))
1230 return PTR_ERR(skb);
1231
1232 cmd_id = ar->wmi.cmd->wow_add_wake_pattern_cmdid;
1233 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1234}
1235
1236static inline int
1237ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id)
1238{
1239 struct sk_buff *skb;
1240 u32 cmd_id;
1241
1242 if (!ar->wmi.ops->gen_wow_del_pattern)
1243 return -EOPNOTSUPP;
1244
1245 skb = ar->wmi.ops->gen_wow_del_pattern(ar, vdev_id, pattern_id);
1246 if (IS_ERR(skb))
1247 return PTR_ERR(skb);
1248
1249 cmd_id = ar->wmi.cmd->wow_del_wake_pattern_cmdid;
1250 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1251}
Marek Puzyniakad45c882015-03-30 09:51:53 +03001252
1253static inline int
1254ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
1255 enum wmi_tdls_state state)
1256{
1257 struct sk_buff *skb;
1258
1259 if (!ar->wmi.ops->gen_update_fw_tdls_state)
1260 return -EOPNOTSUPP;
1261
1262 skb = ar->wmi.ops->gen_update_fw_tdls_state(ar, vdev_id, state);
1263 if (IS_ERR(skb))
1264 return PTR_ERR(skb);
1265
1266 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->tdls_set_state_cmdid);
1267}
1268
1269static inline int
1270ath10k_wmi_tdls_peer_update(struct ath10k *ar,
1271 const struct wmi_tdls_peer_update_cmd_arg *arg,
1272 const struct wmi_tdls_peer_capab_arg *cap,
1273 const struct wmi_channel_arg *chan)
1274{
1275 struct sk_buff *skb;
1276
1277 if (!ar->wmi.ops->gen_tdls_peer_update)
1278 return -EOPNOTSUPP;
1279
1280 skb = ar->wmi.ops->gen_tdls_peer_update(ar, arg, cap, chan);
1281 if (IS_ERR(skb))
1282 return PTR_ERR(skb);
1283
1284 return ath10k_wmi_cmd_send(ar, skb,
1285 ar->wmi.cmd->tdls_peer_update_cmdid);
1286}
1287
Michal Kazior5b272e32015-03-31 10:26:22 +00001288static inline int
1289ath10k_wmi_adaptive_qcs(struct ath10k *ar, bool enable)
1290{
1291 struct sk_buff *skb;
1292
1293 if (!ar->wmi.ops->gen_adaptive_qcs)
1294 return -EOPNOTSUPP;
1295
1296 skb = ar->wmi.ops->gen_adaptive_qcs(ar, enable);
1297 if (IS_ERR(skb))
1298 return PTR_ERR(skb);
1299
1300 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid);
1301}
1302
Maharaja Kennadyrajan29542662015-10-05 17:56:38 +03001303static inline int
1304ath10k_wmi_pdev_get_tpc_config(struct ath10k *ar, u32 param)
1305{
1306 struct sk_buff *skb;
1307
1308 if (!ar->wmi.ops->gen_pdev_get_tpc_config)
1309 return -EOPNOTSUPP;
1310
1311 skb = ar->wmi.ops->gen_pdev_get_tpc_config(ar, param);
1312
1313 if (IS_ERR(skb))
1314 return PTR_ERR(skb);
1315
1316 return ath10k_wmi_cmd_send(ar, skb,
1317 ar->wmi.cmd->pdev_get_tpc_config_cmdid);
1318}
1319
Manikanta Pubbisettybc6f9ae2015-10-16 15:54:52 +03001320static inline int
1321ath10k_wmi_fw_stats_fill(struct ath10k *ar, struct ath10k_fw_stats *fw_stats,
1322 char *buf)
1323{
1324 if (!ar->wmi.ops->fw_stats_fill)
1325 return -EOPNOTSUPP;
1326
1327 ar->wmi.ops->fw_stats_fill(ar, fw_stats, buf);
1328 return 0;
1329}
Maharaja62f77f02015-10-21 11:49:18 +03001330
1331static inline int
1332ath10k_wmi_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable,
1333 u32 detect_level, u32 detect_margin)
1334{
1335 struct sk_buff *skb;
1336
1337 if (!ar->wmi.ops->gen_pdev_enable_adaptive_cca)
1338 return -EOPNOTSUPP;
1339
1340 skb = ar->wmi.ops->gen_pdev_enable_adaptive_cca(ar, enable,
1341 detect_level,
1342 detect_margin);
1343
1344 if (IS_ERR(skb))
1345 return PTR_ERR(skb);
1346
1347 return ath10k_wmi_cmd_send(ar, skb,
1348 ar->wmi.cmd->pdev_enable_adaptive_cca_cmdid);
1349}
1350
Peter Oh6e4de1a2016-01-28 13:54:10 -08001351static inline int
Raja Mani47771902016-03-16 18:13:33 +05301352ath10k_wmi_ext_resource_config(struct ath10k *ar,
1353 enum wmi_host_platform_type type,
1354 u32 fw_feature_bitmap)
1355{
1356 struct sk_buff *skb;
1357
1358 if (!ar->wmi.ops->ext_resource_config)
1359 return -EOPNOTSUPP;
1360
1361 skb = ar->wmi.ops->ext_resource_config(ar, type,
1362 fw_feature_bitmap);
1363
1364 if (IS_ERR(skb))
1365 return PTR_ERR(skb);
1366
1367 return ath10k_wmi_cmd_send(ar, skb,
1368 ar->wmi.cmd->ext_resource_cfg_cmdid);
1369}
1370
1371static inline int
Peter Oh6e4de1a2016-01-28 13:54:10 -08001372ath10k_wmi_get_vdev_subtype(struct ath10k *ar, enum wmi_vdev_subtype subtype)
1373{
1374 if (!ar->wmi.ops->get_vdev_subtype)
1375 return -EOPNOTSUPP;
1376
1377 return ar->wmi.ops->get_vdev_subtype(ar, subtype);
1378}
1379
Rajkumar Manoharan8a0b4592016-04-27 16:23:20 +05301380static inline int
1381ath10k_wmi_pdev_bss_chan_info_request(struct ath10k *ar,
1382 enum wmi_bss_survey_req_type type)
1383{
1384 struct ath10k_wmi *wmi = &ar->wmi;
1385 struct sk_buff *skb;
1386
1387 if (!wmi->ops->gen_pdev_bss_chan_info_req)
1388 return -EOPNOTSUPP;
1389
1390 skb = wmi->ops->gen_pdev_bss_chan_info_req(ar, type);
1391 if (IS_ERR(skb))
1392 return PTR_ERR(skb);
1393
1394 return ath10k_wmi_cmd_send(ar, skb,
1395 wmi->cmd->pdev_bss_chan_info_request_cmdid);
1396}
1397
Michal Kaziore25854f2016-08-19 13:37:41 +03001398static inline int
1399ath10k_wmi_echo(struct ath10k *ar, u32 value)
1400{
1401 struct ath10k_wmi *wmi = &ar->wmi;
1402 struct sk_buff *skb;
1403
1404 if (!wmi->ops->gen_echo)
1405 return -EOPNOTSUPP;
1406
1407 skb = wmi->ops->gen_echo(ar, value);
1408 if (IS_ERR(skb))
1409 return PTR_ERR(skb);
1410
1411 return ath10k_wmi_cmd_send(ar, skb, wmi->cmd->echo_cmdid);
1412}
1413
Michal Kaziord7579d12014-12-03 10:10:54 +02001414#endif