blob: b54aa08cb25cf74de221333591ab903dfe6032d9 [file] [log] [blame]
Michal Kaziord7579d12014-12-03 10:10:54 +02001/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#ifndef _WMI_OPS_H_
19#define _WMI_OPS_H_
20
21struct ath10k;
22struct sk_buff;
23
24struct wmi_ops {
25 void (*rx)(struct ath10k *ar, struct sk_buff *skb);
26 void (*map_svc)(const __le32 *in, unsigned long *out, size_t len);
27
28 int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb,
29 struct wmi_scan_ev_arg *arg);
30 int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb,
31 struct wmi_mgmt_rx_ev_arg *arg);
32 int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb,
33 struct wmi_ch_info_ev_arg *arg);
34 int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb,
35 struct wmi_vdev_start_ev_arg *arg);
36 int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb,
37 struct wmi_peer_kick_ev_arg *arg);
38 int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb,
39 struct wmi_swba_ev_arg *arg);
Raja Mani991adf72015-08-14 11:13:29 +030040 int (*pull_phyerr_hdr)(struct ath10k *ar, struct sk_buff *skb,
41 struct wmi_phyerr_hdr_arg *arg);
42 int (*pull_phyerr)(struct ath10k *ar, const void *phyerr_buf,
43 int left_len, struct wmi_phyerr_ev_arg *arg);
Michal Kaziord7579d12014-12-03 10:10:54 +020044 int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb,
45 struct wmi_svc_rdy_ev_arg *arg);
46 int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb,
47 struct wmi_rdy_ev_arg *arg);
48 int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb,
49 struct ath10k_fw_stats *stats);
Michal Kaziorc1a46542015-03-10 16:21:54 +020050 int (*pull_roam_ev)(struct ath10k *ar, struct sk_buff *skb,
51 struct wmi_roam_ev_arg *arg);
Janusz Dziedzicf5431e82015-03-23 17:32:53 +020052 int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb,
53 struct wmi_wow_ev_arg *arg);
Vivek Natarajan08e75ea2015-08-04 10:45:11 +053054 enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar);
Michal Kaziord7579d12014-12-03 10:10:54 +020055
56 struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
57 struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
58 struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g,
59 u16 rd5g, u16 ctl2g, u16 ctl5g,
60 enum wmi_dfs_region dfs_reg);
61 struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id,
62 u32 value);
63 struct sk_buff *(*gen_init)(struct ath10k *ar);
64 struct sk_buff *(*gen_start_scan)(struct ath10k *ar,
65 const struct wmi_start_scan_arg *arg);
66 struct sk_buff *(*gen_stop_scan)(struct ath10k *ar,
67 const struct wmi_stop_scan_arg *arg);
68 struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id,
69 enum wmi_vdev_type type,
70 enum wmi_vdev_subtype subtype,
71 const u8 macaddr[ETH_ALEN]);
72 struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id);
73 struct sk_buff *(*gen_vdev_start)(struct ath10k *ar,
74 const struct wmi_vdev_start_request_arg *arg,
75 bool restart);
76 struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id);
77 struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid,
78 const u8 *bssid);
79 struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id);
80 struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id,
81 u32 param_id, u32 param_value);
82 struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar,
83 const struct wmi_vdev_install_key_arg *arg);
84 struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar,
85 const struct wmi_vdev_spectral_conf_arg *arg);
86 struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id,
87 u32 trigger, u32 enable);
Michal Kazior6d492fe2015-01-28 09:57:22 +020088 struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id,
89 const struct wmi_wmm_params_all_arg *arg);
Michal Kaziord7579d12014-12-03 10:10:54 +020090 struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id,
Marek Puzyniak7390ed32015-03-30 09:51:52 +030091 const u8 peer_addr[ETH_ALEN],
92 enum wmi_peer_type peer_type);
Michal Kaziord7579d12014-12-03 10:10:54 +020093 struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id,
94 const u8 peer_addr[ETH_ALEN]);
95 struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id,
96 const u8 peer_addr[ETH_ALEN],
97 u32 tid_bitmap);
98 struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id,
99 const u8 *peer_addr,
100 enum wmi_peer_param param_id,
101 u32 param_value);
102 struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar,
103 const struct wmi_peer_assoc_complete_arg *arg);
104 struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id,
105 enum wmi_sta_ps_mode psmode);
106 struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id,
107 enum wmi_sta_powersave_param param_id,
108 u32 value);
109 struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id,
110 const u8 *mac,
111 enum wmi_ap_ps_peer_param param_id,
112 u32 value);
113 struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar,
114 const struct wmi_scan_chan_list_arg *arg);
Michal Kazior9ad50182015-01-29 14:29:47 +0200115 struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id,
116 const void *bcn, size_t bcn_len,
117 u32 bcn_paddr, bool dtim_zero,
118 bool deliver_cab);
Michal Kaziord7579d12014-12-03 10:10:54 +0200119 struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar,
Michal Kazior5e752e42015-01-19 09:53:41 +0100120 const struct wmi_wmm_params_all_arg *arg);
Michal Kaziorde23d3e2015-02-15 16:50:41 +0200121 struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask);
Michal Kaziord7579d12014-12-03 10:10:54 +0200122 struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar,
123 enum wmi_force_fw_hang_type type,
124 u32 delay_ms);
125 struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
SenthilKumar Jegadeesan467210a2015-01-29 14:36:52 +0530126 struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u32 module_enable,
127 u32 log_level);
Michal Kaziord7579d12014-12-03 10:10:54 +0200128 struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
129 struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar);
Rajkumar Manoharanffdd7382014-12-17 12:21:40 +0200130 struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar,
131 u32 period, u32 duration,
132 u32 next_offset,
133 u32 enabled);
Rajkumar Manoharana57a6a22014-12-17 12:22:17 +0200134 struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar);
Rajkumar Manoharandc8ab272015-01-12 14:07:25 +0200135 struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id,
136 const u8 *mac);
Rajkumar Manoharan65c08932015-01-12 14:07:26 +0200137 struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id,
138 const u8 *mac, u32 tid, u32 buf_size);
Rajkumar Manoharan11597412015-01-12 14:07:26 +0200139 struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id,
140 const u8 *mac, u32 tid,
141 u32 status);
Rajkumar Manoharan50abef82015-01-12 14:07:26 +0200142 struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id,
143 const u8 *mac, u32 tid, u32 initiator,
144 u32 reason);
Michal Kaziorbe9ce9d2015-01-13 16:30:11 +0200145 struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id,
146 u32 tim_ie_offset, struct sk_buff *bcn,
147 u32 prb_caps, u32 prb_erp,
148 void *prb_ies, size_t prb_ies_len);
Michal Kazior4c4955f2015-01-13 16:30:11 +0200149 struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id,
150 struct sk_buff *bcn);
Michal Kazior369242b4e2015-01-13 16:30:11 +0200151 struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id,
152 const u8 *p2p_ie);
Janusz Dziedzic0c7e4772015-01-24 12:14:52 +0200153 struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id,
154 const u8 peer_addr[ETH_ALEN],
155 const struct wmi_sta_uapsd_auto_trig_arg *args,
156 u32 num_ac);
Janusz Dziedzic6e8b1882015-01-28 09:57:39 +0200157 struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar,
158 const struct wmi_sta_keepalive_arg *arg);
Janusz Dziedzicf5431e82015-03-23 17:32:53 +0200159 struct sk_buff *(*gen_wow_enable)(struct ath10k *ar);
160 struct sk_buff *(*gen_wow_add_wakeup_event)(struct ath10k *ar, u32 vdev_id,
161 enum wmi_wow_wakeup_event event,
162 u32 enable);
163 struct sk_buff *(*gen_wow_host_wakeup_ind)(struct ath10k *ar);
Janusz Dziedzicd4976102015-03-23 17:32:54 +0200164 struct sk_buff *(*gen_wow_add_pattern)(struct ath10k *ar, u32 vdev_id,
165 u32 pattern_id,
166 const u8 *pattern,
167 const u8 *mask,
168 int pattern_len,
169 int pattern_offset);
170 struct sk_buff *(*gen_wow_del_pattern)(struct ath10k *ar, u32 vdev_id,
171 u32 pattern_id);
Marek Puzyniakad45c882015-03-30 09:51:53 +0300172 struct sk_buff *(*gen_update_fw_tdls_state)(struct ath10k *ar,
173 u32 vdev_id,
174 enum wmi_tdls_state state);
175 struct sk_buff *(*gen_tdls_peer_update)(struct ath10k *ar,
176 const struct wmi_tdls_peer_update_cmd_arg *arg,
177 const struct wmi_tdls_peer_capab_arg *cap,
178 const struct wmi_channel_arg *chan);
Michal Kazior5b272e32015-03-31 10:26:22 +0000179 struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable);
Maharaja Kennadyrajan29542662015-10-05 17:56:38 +0300180 struct sk_buff *(*gen_pdev_get_tpc_config)(struct ath10k *ar,
181 u32 param);
Manikanta Pubbisettybc6f9ae2015-10-16 15:54:52 +0300182 void (*fw_stats_fill)(struct ath10k *ar,
183 struct ath10k_fw_stats *fw_stats,
184 char *buf);
Michal Kaziord7579d12014-12-03 10:10:54 +0200185};
186
187int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
188
189static inline int
190ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb)
191{
192 if (WARN_ON_ONCE(!ar->wmi.ops->rx))
193 return -EOPNOTSUPP;
194
195 ar->wmi.ops->rx(ar, skb);
196 return 0;
197}
198
199static inline int
200ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out,
201 size_t len)
202{
203 if (!ar->wmi.ops->map_svc)
204 return -EOPNOTSUPP;
205
206 ar->wmi.ops->map_svc(in, out, len);
207 return 0;
208}
209
210static inline int
211ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb,
212 struct wmi_scan_ev_arg *arg)
213{
214 if (!ar->wmi.ops->pull_scan)
215 return -EOPNOTSUPP;
216
217 return ar->wmi.ops->pull_scan(ar, skb, arg);
218}
219
220static inline int
221ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb,
222 struct wmi_mgmt_rx_ev_arg *arg)
223{
224 if (!ar->wmi.ops->pull_mgmt_rx)
225 return -EOPNOTSUPP;
226
227 return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg);
228}
229
230static inline int
231ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb,
232 struct wmi_ch_info_ev_arg *arg)
233{
234 if (!ar->wmi.ops->pull_ch_info)
235 return -EOPNOTSUPP;
236
237 return ar->wmi.ops->pull_ch_info(ar, skb, arg);
238}
239
240static inline int
241ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb,
242 struct wmi_vdev_start_ev_arg *arg)
243{
244 if (!ar->wmi.ops->pull_vdev_start)
245 return -EOPNOTSUPP;
246
247 return ar->wmi.ops->pull_vdev_start(ar, skb, arg);
248}
249
250static inline int
251ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb,
252 struct wmi_peer_kick_ev_arg *arg)
253{
254 if (!ar->wmi.ops->pull_peer_kick)
255 return -EOPNOTSUPP;
256
257 return ar->wmi.ops->pull_peer_kick(ar, skb, arg);
258}
259
260static inline int
261ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb,
262 struct wmi_swba_ev_arg *arg)
263{
264 if (!ar->wmi.ops->pull_swba)
265 return -EOPNOTSUPP;
266
267 return ar->wmi.ops->pull_swba(ar, skb, arg);
268}
269
270static inline int
Raja Mani991adf72015-08-14 11:13:29 +0300271ath10k_wmi_pull_phyerr_hdr(struct ath10k *ar, struct sk_buff *skb,
272 struct wmi_phyerr_hdr_arg *arg)
273{
274 if (!ar->wmi.ops->pull_phyerr_hdr)
275 return -EOPNOTSUPP;
276
277 return ar->wmi.ops->pull_phyerr_hdr(ar, skb, arg);
278}
279
280static inline int
281ath10k_wmi_pull_phyerr(struct ath10k *ar, const void *phyerr_buf,
282 int left_len, struct wmi_phyerr_ev_arg *arg)
Michal Kaziord7579d12014-12-03 10:10:54 +0200283{
284 if (!ar->wmi.ops->pull_phyerr)
285 return -EOPNOTSUPP;
286
Raja Mani991adf72015-08-14 11:13:29 +0300287 return ar->wmi.ops->pull_phyerr(ar, phyerr_buf, left_len, arg);
Michal Kaziord7579d12014-12-03 10:10:54 +0200288}
289
290static inline int
291ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb,
292 struct wmi_svc_rdy_ev_arg *arg)
293{
294 if (!ar->wmi.ops->pull_svc_rdy)
295 return -EOPNOTSUPP;
296
297 return ar->wmi.ops->pull_svc_rdy(ar, skb, arg);
298}
299
300static inline int
301ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb,
302 struct wmi_rdy_ev_arg *arg)
303{
304 if (!ar->wmi.ops->pull_rdy)
305 return -EOPNOTSUPP;
306
307 return ar->wmi.ops->pull_rdy(ar, skb, arg);
308}
309
310static inline int
311ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
312 struct ath10k_fw_stats *stats)
313{
314 if (!ar->wmi.ops->pull_fw_stats)
315 return -EOPNOTSUPP;
316
317 return ar->wmi.ops->pull_fw_stats(ar, skb, stats);
318}
319
320static inline int
Michal Kaziorc1a46542015-03-10 16:21:54 +0200321ath10k_wmi_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
322 struct wmi_roam_ev_arg *arg)
323{
324 if (!ar->wmi.ops->pull_roam_ev)
325 return -EOPNOTSUPP;
326
327 return ar->wmi.ops->pull_roam_ev(ar, skb, arg);
328}
329
330static inline int
Janusz Dziedzicf5431e82015-03-23 17:32:53 +0200331ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb,
332 struct wmi_wow_ev_arg *arg)
333{
334 if (!ar->wmi.ops->pull_wow_event)
335 return -EOPNOTSUPP;
336
337 return ar->wmi.ops->pull_wow_event(ar, skb, arg);
338}
339
Vivek Natarajan08e75ea2015-08-04 10:45:11 +0530340static inline enum wmi_txbf_conf
341ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar)
342{
343 if (!ar->wmi.ops->get_txbf_conf_scheme)
344 return WMI_TXBF_CONF_UNSUPPORTED;
345
346 return ar->wmi.ops->get_txbf_conf_scheme(ar);
347}
348
Janusz Dziedzicf5431e82015-03-23 17:32:53 +0200349static inline int
Michal Kaziord7579d12014-12-03 10:10:54 +0200350ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
351{
352 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
353 struct sk_buff *skb;
354 int ret;
355
356 if (!ar->wmi.ops->gen_mgmt_tx)
357 return -EOPNOTSUPP;
358
359 skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu);
360 if (IS_ERR(skb))
361 return PTR_ERR(skb);
362
363 ret = ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->mgmt_tx_cmdid);
364 if (ret)
365 return ret;
366
367 /* FIXME There's no ACK event for Management Tx. This probably
368 * shouldn't be called here either. */
369 info->flags |= IEEE80211_TX_STAT_ACK;
370 ieee80211_tx_status_irqsafe(ar->hw, msdu);
371
372 return 0;
373}
374
375static inline int
376ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
377 u16 ctl2g, u16 ctl5g,
378 enum wmi_dfs_region dfs_reg)
379{
380 struct sk_buff *skb;
381
382 if (!ar->wmi.ops->gen_pdev_set_rd)
383 return -EOPNOTSUPP;
384
385 skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g,
386 dfs_reg);
387 if (IS_ERR(skb))
388 return PTR_ERR(skb);
389
390 return ath10k_wmi_cmd_send(ar, skb,
391 ar->wmi.cmd->pdev_set_regdomain_cmdid);
392}
393
394static inline int
395ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
396{
397 struct sk_buff *skb;
398
399 if (!ar->wmi.ops->gen_pdev_suspend)
400 return -EOPNOTSUPP;
401
402 skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt);
403 if (IS_ERR(skb))
404 return PTR_ERR(skb);
405
406 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
407}
408
409static inline int
410ath10k_wmi_pdev_resume_target(struct ath10k *ar)
411{
412 struct sk_buff *skb;
413
414 if (!ar->wmi.ops->gen_pdev_resume)
415 return -EOPNOTSUPP;
416
417 skb = ar->wmi.ops->gen_pdev_resume(ar);
418 if (IS_ERR(skb))
419 return PTR_ERR(skb);
420
421 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
422}
423
424static inline int
425ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
426{
427 struct sk_buff *skb;
428
429 if (!ar->wmi.ops->gen_pdev_set_param)
430 return -EOPNOTSUPP;
431
432 skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value);
433 if (IS_ERR(skb))
434 return PTR_ERR(skb);
435
436 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
437}
438
439static inline int
440ath10k_wmi_cmd_init(struct ath10k *ar)
441{
442 struct sk_buff *skb;
443
444 if (!ar->wmi.ops->gen_init)
445 return -EOPNOTSUPP;
446
447 skb = ar->wmi.ops->gen_init(ar);
448 if (IS_ERR(skb))
449 return PTR_ERR(skb);
450
451 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid);
452}
453
454static inline int
455ath10k_wmi_start_scan(struct ath10k *ar,
456 const struct wmi_start_scan_arg *arg)
457{
458 struct sk_buff *skb;
459
460 if (!ar->wmi.ops->gen_start_scan)
461 return -EOPNOTSUPP;
462
463 skb = ar->wmi.ops->gen_start_scan(ar, arg);
464 if (IS_ERR(skb))
465 return PTR_ERR(skb);
466
467 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
468}
469
470static inline int
471ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
472{
473 struct sk_buff *skb;
474
475 if (!ar->wmi.ops->gen_stop_scan)
476 return -EOPNOTSUPP;
477
478 skb = ar->wmi.ops->gen_stop_scan(ar, arg);
479 if (IS_ERR(skb))
480 return PTR_ERR(skb);
481
482 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
483}
484
485static inline int
486ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
487 enum wmi_vdev_type type,
488 enum wmi_vdev_subtype subtype,
489 const u8 macaddr[ETH_ALEN])
490{
491 struct sk_buff *skb;
492
493 if (!ar->wmi.ops->gen_vdev_create)
494 return -EOPNOTSUPP;
495
496 skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr);
497 if (IS_ERR(skb))
498 return PTR_ERR(skb);
499
500 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
501}
502
503static inline int
504ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
505{
506 struct sk_buff *skb;
507
508 if (!ar->wmi.ops->gen_vdev_delete)
509 return -EOPNOTSUPP;
510
511 skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id);
512 if (IS_ERR(skb))
513 return PTR_ERR(skb);
514
515 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
516}
517
518static inline int
519ath10k_wmi_vdev_start(struct ath10k *ar,
520 const struct wmi_vdev_start_request_arg *arg)
521{
522 struct sk_buff *skb;
523
524 if (!ar->wmi.ops->gen_vdev_start)
525 return -EOPNOTSUPP;
526
527 skb = ar->wmi.ops->gen_vdev_start(ar, arg, false);
528 if (IS_ERR(skb))
529 return PTR_ERR(skb);
530
531 return ath10k_wmi_cmd_send(ar, skb,
532 ar->wmi.cmd->vdev_start_request_cmdid);
533}
534
535static inline int
536ath10k_wmi_vdev_restart(struct ath10k *ar,
537 const struct wmi_vdev_start_request_arg *arg)
538{
539 struct sk_buff *skb;
540
541 if (!ar->wmi.ops->gen_vdev_start)
542 return -EOPNOTSUPP;
543
544 skb = ar->wmi.ops->gen_vdev_start(ar, arg, true);
545 if (IS_ERR(skb))
546 return PTR_ERR(skb);
547
548 return ath10k_wmi_cmd_send(ar, skb,
549 ar->wmi.cmd->vdev_restart_request_cmdid);
550}
551
552static inline int
553ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
554{
555 struct sk_buff *skb;
556
557 if (!ar->wmi.ops->gen_vdev_stop)
558 return -EOPNOTSUPP;
559
560 skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id);
561 if (IS_ERR(skb))
562 return PTR_ERR(skb);
563
564 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
565}
566
567static inline int
568ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
569{
570 struct sk_buff *skb;
571
572 if (!ar->wmi.ops->gen_vdev_up)
573 return -EOPNOTSUPP;
574
575 skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid);
576 if (IS_ERR(skb))
577 return PTR_ERR(skb);
578
579 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
580}
581
582static inline int
583ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
584{
585 struct sk_buff *skb;
586
587 if (!ar->wmi.ops->gen_vdev_down)
588 return -EOPNOTSUPP;
589
590 skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id);
591 if (IS_ERR(skb))
592 return PTR_ERR(skb);
593
594 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
595}
596
597static inline int
598ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id,
599 u32 param_value)
600{
601 struct sk_buff *skb;
602
603 if (!ar->wmi.ops->gen_vdev_set_param)
604 return -EOPNOTSUPP;
605
606 skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id,
607 param_value);
608 if (IS_ERR(skb))
609 return PTR_ERR(skb);
610
611 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
612}
613
614static inline int
615ath10k_wmi_vdev_install_key(struct ath10k *ar,
616 const struct wmi_vdev_install_key_arg *arg)
617{
618 struct sk_buff *skb;
619
620 if (!ar->wmi.ops->gen_vdev_install_key)
621 return -EOPNOTSUPP;
622
623 skb = ar->wmi.ops->gen_vdev_install_key(ar, arg);
624 if (IS_ERR(skb))
625 return PTR_ERR(skb);
626
627 return ath10k_wmi_cmd_send(ar, skb,
628 ar->wmi.cmd->vdev_install_key_cmdid);
629}
630
631static inline int
632ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
633 const struct wmi_vdev_spectral_conf_arg *arg)
634{
635 struct sk_buff *skb;
636 u32 cmd_id;
637
638 skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg);
639 if (IS_ERR(skb))
640 return PTR_ERR(skb);
641
642 cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid;
643 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
644}
645
646static inline int
647ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
648 u32 enable)
649{
650 struct sk_buff *skb;
651 u32 cmd_id;
652
653 skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger,
654 enable);
655 if (IS_ERR(skb))
656 return PTR_ERR(skb);
657
658 cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid;
659 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
660}
661
662static inline int
Janusz Dziedzic0c7e4772015-01-24 12:14:52 +0200663ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
664 const u8 peer_addr[ETH_ALEN],
665 const struct wmi_sta_uapsd_auto_trig_arg *args,
666 u32 num_ac)
667{
668 struct sk_buff *skb;
669 u32 cmd_id;
670
671 if (!ar->wmi.ops->gen_vdev_sta_uapsd)
672 return -EOPNOTSUPP;
673
674 skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args,
675 num_ac);
676 if (IS_ERR(skb))
677 return PTR_ERR(skb);
678
679 cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid;
680 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
681}
682
683static inline int
Michal Kazior6d492fe2015-01-28 09:57:22 +0200684ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
685 const struct wmi_wmm_params_all_arg *arg)
686{
687 struct sk_buff *skb;
688 u32 cmd_id;
689
690 skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg);
691 if (IS_ERR(skb))
692 return PTR_ERR(skb);
693
694 cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid;
695 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
696}
697
698static inline int
Michal Kaziord7579d12014-12-03 10:10:54 +0200699ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
Marek Puzyniak7390ed32015-03-30 09:51:52 +0300700 const u8 peer_addr[ETH_ALEN],
701 enum wmi_peer_type peer_type)
Michal Kaziord7579d12014-12-03 10:10:54 +0200702{
703 struct sk_buff *skb;
704
705 if (!ar->wmi.ops->gen_peer_create)
706 return -EOPNOTSUPP;
707
Marek Puzyniak7390ed32015-03-30 09:51:52 +0300708 skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr, peer_type);
Michal Kaziord7579d12014-12-03 10:10:54 +0200709 if (IS_ERR(skb))
710 return PTR_ERR(skb);
711
712 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
713}
714
715static inline int
716ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
717 const u8 peer_addr[ETH_ALEN])
718{
719 struct sk_buff *skb;
720
721 if (!ar->wmi.ops->gen_peer_delete)
722 return -EOPNOTSUPP;
723
724 skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr);
725 if (IS_ERR(skb))
726 return PTR_ERR(skb);
727
728 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
729}
730
731static inline int
732ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
733 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
734{
735 struct sk_buff *skb;
736
737 if (!ar->wmi.ops->gen_peer_flush)
738 return -EOPNOTSUPP;
739
740 skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap);
741 if (IS_ERR(skb))
742 return PTR_ERR(skb);
743
744 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
745}
746
747static inline int
748ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr,
749 enum wmi_peer_param param_id, u32 param_value)
750{
751 struct sk_buff *skb;
752
753 if (!ar->wmi.ops->gen_peer_set_param)
754 return -EOPNOTSUPP;
755
756 skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id,
757 param_value);
758 if (IS_ERR(skb))
759 return PTR_ERR(skb);
760
761 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
762}
763
764static inline int
765ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
766 enum wmi_sta_ps_mode psmode)
767{
768 struct sk_buff *skb;
769
770 if (!ar->wmi.ops->gen_set_psmode)
771 return -EOPNOTSUPP;
772
773 skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode);
774 if (IS_ERR(skb))
775 return PTR_ERR(skb);
776
777 return ath10k_wmi_cmd_send(ar, skb,
778 ar->wmi.cmd->sta_powersave_mode_cmdid);
779}
780
781static inline int
782ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
783 enum wmi_sta_powersave_param param_id, u32 value)
784{
785 struct sk_buff *skb;
786
787 if (!ar->wmi.ops->gen_set_sta_ps)
788 return -EOPNOTSUPP;
789
790 skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value);
791 if (IS_ERR(skb))
792 return PTR_ERR(skb);
793
794 return ath10k_wmi_cmd_send(ar, skb,
795 ar->wmi.cmd->sta_powersave_param_cmdid);
796}
797
798static inline int
799ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
800 enum wmi_ap_ps_peer_param param_id, u32 value)
801{
802 struct sk_buff *skb;
803
804 if (!ar->wmi.ops->gen_set_ap_ps)
805 return -EOPNOTSUPP;
806
807 skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value);
808 if (IS_ERR(skb))
809 return PTR_ERR(skb);
810
811 return ath10k_wmi_cmd_send(ar, skb,
812 ar->wmi.cmd->ap_ps_peer_param_cmdid);
813}
814
815static inline int
816ath10k_wmi_scan_chan_list(struct ath10k *ar,
817 const struct wmi_scan_chan_list_arg *arg)
818{
819 struct sk_buff *skb;
820
821 if (!ar->wmi.ops->gen_scan_chan_list)
822 return -EOPNOTSUPP;
823
824 skb = ar->wmi.ops->gen_scan_chan_list(ar, arg);
825 if (IS_ERR(skb))
826 return PTR_ERR(skb);
827
828 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
829}
830
831static inline int
832ath10k_wmi_peer_assoc(struct ath10k *ar,
833 const struct wmi_peer_assoc_complete_arg *arg)
834{
835 struct sk_buff *skb;
836
837 if (!ar->wmi.ops->gen_peer_assoc)
838 return -EOPNOTSUPP;
839
840 skb = ar->wmi.ops->gen_peer_assoc(ar, arg);
841 if (IS_ERR(skb))
842 return PTR_ERR(skb);
843
844 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
845}
846
847static inline int
Michal Kazior9ad50182015-01-29 14:29:47 +0200848ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id,
849 const void *bcn, size_t bcn_len,
850 u32 bcn_paddr, bool dtim_zero,
851 bool deliver_cab)
Michal Kaziord7579d12014-12-03 10:10:54 +0200852{
Michal Kaziord7579d12014-12-03 10:10:54 +0200853 struct sk_buff *skb;
854 int ret;
855
856 if (!ar->wmi.ops->gen_beacon_dma)
857 return -EOPNOTSUPP;
858
Michal Kazior9ad50182015-01-29 14:29:47 +0200859 skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr,
860 dtim_zero, deliver_cab);
Michal Kaziord7579d12014-12-03 10:10:54 +0200861 if (IS_ERR(skb))
862 return PTR_ERR(skb);
863
864 ret = ath10k_wmi_cmd_send_nowait(ar, skb,
865 ar->wmi.cmd->pdev_send_bcn_cmdid);
866 if (ret) {
867 dev_kfree_skb(skb);
868 return ret;
869 }
870
871 return 0;
872}
873
874static inline int
875ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
Michal Kazior5e752e42015-01-19 09:53:41 +0100876 const struct wmi_wmm_params_all_arg *arg)
Michal Kaziord7579d12014-12-03 10:10:54 +0200877{
878 struct sk_buff *skb;
879
880 if (!ar->wmi.ops->gen_pdev_set_wmm)
881 return -EOPNOTSUPP;
882
883 skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg);
884 if (IS_ERR(skb))
885 return PTR_ERR(skb);
886
887 return ath10k_wmi_cmd_send(ar, skb,
888 ar->wmi.cmd->pdev_set_wmm_params_cmdid);
889}
890
891static inline int
Michal Kaziorde23d3e2015-02-15 16:50:41 +0200892ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask)
Michal Kaziord7579d12014-12-03 10:10:54 +0200893{
894 struct sk_buff *skb;
895
896 if (!ar->wmi.ops->gen_request_stats)
897 return -EOPNOTSUPP;
898
Michal Kaziorde23d3e2015-02-15 16:50:41 +0200899 skb = ar->wmi.ops->gen_request_stats(ar, stats_mask);
Michal Kaziord7579d12014-12-03 10:10:54 +0200900 if (IS_ERR(skb))
901 return PTR_ERR(skb);
902
903 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
904}
905
906static inline int
907ath10k_wmi_force_fw_hang(struct ath10k *ar,
908 enum wmi_force_fw_hang_type type, u32 delay_ms)
909{
910 struct sk_buff *skb;
911
912 if (!ar->wmi.ops->gen_force_fw_hang)
913 return -EOPNOTSUPP;
914
915 skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms);
916 if (IS_ERR(skb))
917 return PTR_ERR(skb);
918
919 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
920}
921
922static inline int
SenthilKumar Jegadeesan467210a2015-01-29 14:36:52 +0530923ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable, u32 log_level)
Michal Kaziord7579d12014-12-03 10:10:54 +0200924{
925 struct sk_buff *skb;
926
927 if (!ar->wmi.ops->gen_dbglog_cfg)
928 return -EOPNOTSUPP;
929
SenthilKumar Jegadeesan467210a2015-01-29 14:36:52 +0530930 skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level);
Michal Kaziord7579d12014-12-03 10:10:54 +0200931 if (IS_ERR(skb))
932 return PTR_ERR(skb);
933
934 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
935}
936
937static inline int
938ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter)
939{
940 struct sk_buff *skb;
941
942 if (!ar->wmi.ops->gen_pktlog_enable)
943 return -EOPNOTSUPP;
944
945 skb = ar->wmi.ops->gen_pktlog_enable(ar, filter);
946 if (IS_ERR(skb))
947 return PTR_ERR(skb);
948
949 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid);
950}
951
952static inline int
953ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar)
954{
955 struct sk_buff *skb;
956
957 if (!ar->wmi.ops->gen_pktlog_disable)
958 return -EOPNOTSUPP;
959
960 skb = ar->wmi.ops->gen_pktlog_disable(ar);
961 if (IS_ERR(skb))
962 return PTR_ERR(skb);
963
964 return ath10k_wmi_cmd_send(ar, skb,
965 ar->wmi.cmd->pdev_pktlog_disable_cmdid);
966}
967
Rajkumar Manoharanffdd7382014-12-17 12:21:40 +0200968static inline int
969ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration,
970 u32 next_offset, u32 enabled)
971{
972 struct sk_buff *skb;
973
974 if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
975 return -EOPNOTSUPP;
976
977 skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration,
978 next_offset, enabled);
979 if (IS_ERR(skb))
980 return PTR_ERR(skb);
981
982 return ath10k_wmi_cmd_send(ar, skb,
983 ar->wmi.cmd->pdev_set_quiet_mode_cmdid);
984}
985
Rajkumar Manoharana57a6a22014-12-17 12:22:17 +0200986static inline int
987ath10k_wmi_pdev_get_temperature(struct ath10k *ar)
988{
989 struct sk_buff *skb;
990
991 if (!ar->wmi.ops->gen_pdev_get_temperature)
992 return -EOPNOTSUPP;
993
994 skb = ar->wmi.ops->gen_pdev_get_temperature(ar);
995 if (IS_ERR(skb))
996 return PTR_ERR(skb);
997
998 return ath10k_wmi_cmd_send(ar, skb,
999 ar->wmi.cmd->pdev_get_temperature_cmdid);
1000}
1001
Rajkumar Manoharandc8ab272015-01-12 14:07:25 +02001002static inline int
1003ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac)
1004{
1005 struct sk_buff *skb;
1006
1007 if (!ar->wmi.ops->gen_addba_clear_resp)
1008 return -EOPNOTSUPP;
1009
1010 skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac);
1011 if (IS_ERR(skb))
1012 return PTR_ERR(skb);
1013
1014 return ath10k_wmi_cmd_send(ar, skb,
1015 ar->wmi.cmd->addba_clear_resp_cmdid);
1016}
1017
Rajkumar Manoharan65c08932015-01-12 14:07:26 +02001018static inline int
1019ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1020 u32 tid, u32 buf_size)
1021{
1022 struct sk_buff *skb;
1023
1024 if (!ar->wmi.ops->gen_addba_send)
1025 return -EOPNOTSUPP;
1026
1027 skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size);
1028 if (IS_ERR(skb))
1029 return PTR_ERR(skb);
1030
1031 return ath10k_wmi_cmd_send(ar, skb,
1032 ar->wmi.cmd->addba_send_cmdid);
1033}
1034
Rajkumar Manoharan11597412015-01-12 14:07:26 +02001035static inline int
1036ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1037 u32 tid, u32 status)
1038{
1039 struct sk_buff *skb;
1040
1041 if (!ar->wmi.ops->gen_addba_set_resp)
1042 return -EOPNOTSUPP;
1043
1044 skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status);
1045 if (IS_ERR(skb))
1046 return PTR_ERR(skb);
1047
1048 return ath10k_wmi_cmd_send(ar, skb,
1049 ar->wmi.cmd->addba_set_resp_cmdid);
1050}
1051
Rajkumar Manoharan50abef82015-01-12 14:07:26 +02001052static inline int
1053ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1054 u32 tid, u32 initiator, u32 reason)
1055{
1056 struct sk_buff *skb;
1057
1058 if (!ar->wmi.ops->gen_delba_send)
1059 return -EOPNOTSUPP;
1060
1061 skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator,
1062 reason);
1063 if (IS_ERR(skb))
1064 return PTR_ERR(skb);
1065
1066 return ath10k_wmi_cmd_send(ar, skb,
1067 ar->wmi.cmd->delba_send_cmdid);
1068}
1069
Michal Kaziorbe9ce9d2015-01-13 16:30:11 +02001070static inline int
1071ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset,
1072 struct sk_buff *bcn, u32 prb_caps, u32 prb_erp,
1073 void *prb_ies, size_t prb_ies_len)
1074{
1075 struct sk_buff *skb;
1076
1077 if (!ar->wmi.ops->gen_bcn_tmpl)
1078 return -EOPNOTSUPP;
1079
1080 skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn,
1081 prb_caps, prb_erp, prb_ies,
1082 prb_ies_len);
1083 if (IS_ERR(skb))
1084 return PTR_ERR(skb);
1085
1086 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid);
1087}
1088
Michal Kazior4c4955f2015-01-13 16:30:11 +02001089static inline int
1090ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb)
1091{
1092 struct sk_buff *skb;
1093
1094 if (!ar->wmi.ops->gen_prb_tmpl)
1095 return -EOPNOTSUPP;
1096
1097 skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb);
1098 if (IS_ERR(skb))
1099 return PTR_ERR(skb);
1100
1101 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid);
1102}
1103
Michal Kazior369242b4e2015-01-13 16:30:11 +02001104static inline int
1105ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie)
1106{
1107 struct sk_buff *skb;
1108
1109 if (!ar->wmi.ops->gen_p2p_go_bcn_ie)
1110 return -EOPNOTSUPP;
1111
1112 skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie);
1113 if (IS_ERR(skb))
1114 return PTR_ERR(skb);
1115
1116 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie);
1117}
1118
Janusz Dziedzic6e8b1882015-01-28 09:57:39 +02001119static inline int
1120ath10k_wmi_sta_keepalive(struct ath10k *ar,
1121 const struct wmi_sta_keepalive_arg *arg)
1122{
1123 struct sk_buff *skb;
1124 u32 cmd_id;
1125
1126 if (!ar->wmi.ops->gen_sta_keepalive)
1127 return -EOPNOTSUPP;
1128
1129 skb = ar->wmi.ops->gen_sta_keepalive(ar, arg);
1130 if (IS_ERR(skb))
1131 return PTR_ERR(skb);
1132
1133 cmd_id = ar->wmi.cmd->sta_keepalive_cmd;
1134 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1135}
1136
Janusz Dziedzicf5431e82015-03-23 17:32:53 +02001137static inline int
1138ath10k_wmi_wow_enable(struct ath10k *ar)
1139{
1140 struct sk_buff *skb;
1141 u32 cmd_id;
1142
1143 if (!ar->wmi.ops->gen_wow_enable)
1144 return -EOPNOTSUPP;
1145
1146 skb = ar->wmi.ops->gen_wow_enable(ar);
1147 if (IS_ERR(skb))
1148 return PTR_ERR(skb);
1149
1150 cmd_id = ar->wmi.cmd->wow_enable_cmdid;
1151 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1152}
1153
1154static inline int
1155ath10k_wmi_wow_add_wakeup_event(struct ath10k *ar, u32 vdev_id,
1156 enum wmi_wow_wakeup_event event,
1157 u32 enable)
1158{
1159 struct sk_buff *skb;
1160 u32 cmd_id;
1161
1162 if (!ar->wmi.ops->gen_wow_add_wakeup_event)
1163 return -EOPNOTSUPP;
1164
1165 skb = ar->wmi.ops->gen_wow_add_wakeup_event(ar, vdev_id, event, enable);
1166 if (IS_ERR(skb))
1167 return PTR_ERR(skb);
1168
1169 cmd_id = ar->wmi.cmd->wow_enable_disable_wake_event_cmdid;
1170 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1171}
1172
1173static inline int
1174ath10k_wmi_wow_host_wakeup_ind(struct ath10k *ar)
1175{
1176 struct sk_buff *skb;
1177 u32 cmd_id;
1178
1179 if (!ar->wmi.ops->gen_wow_host_wakeup_ind)
1180 return -EOPNOTSUPP;
1181
1182 skb = ar->wmi.ops->gen_wow_host_wakeup_ind(ar);
1183 if (IS_ERR(skb))
1184 return PTR_ERR(skb);
1185
1186 cmd_id = ar->wmi.cmd->wow_hostwakeup_from_sleep_cmdid;
1187 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1188}
1189
Janusz Dziedzicd4976102015-03-23 17:32:54 +02001190static inline int
1191ath10k_wmi_wow_add_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id,
1192 const u8 *pattern, const u8 *mask,
1193 int pattern_len, int pattern_offset)
1194{
1195 struct sk_buff *skb;
1196 u32 cmd_id;
1197
1198 if (!ar->wmi.ops->gen_wow_add_pattern)
1199 return -EOPNOTSUPP;
1200
1201 skb = ar->wmi.ops->gen_wow_add_pattern(ar, vdev_id, pattern_id,
1202 pattern, mask, pattern_len,
1203 pattern_offset);
1204 if (IS_ERR(skb))
1205 return PTR_ERR(skb);
1206
1207 cmd_id = ar->wmi.cmd->wow_add_wake_pattern_cmdid;
1208 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1209}
1210
1211static inline int
1212ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id)
1213{
1214 struct sk_buff *skb;
1215 u32 cmd_id;
1216
1217 if (!ar->wmi.ops->gen_wow_del_pattern)
1218 return -EOPNOTSUPP;
1219
1220 skb = ar->wmi.ops->gen_wow_del_pattern(ar, vdev_id, pattern_id);
1221 if (IS_ERR(skb))
1222 return PTR_ERR(skb);
1223
1224 cmd_id = ar->wmi.cmd->wow_del_wake_pattern_cmdid;
1225 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1226}
Marek Puzyniakad45c882015-03-30 09:51:53 +03001227
1228static inline int
1229ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
1230 enum wmi_tdls_state state)
1231{
1232 struct sk_buff *skb;
1233
1234 if (!ar->wmi.ops->gen_update_fw_tdls_state)
1235 return -EOPNOTSUPP;
1236
1237 skb = ar->wmi.ops->gen_update_fw_tdls_state(ar, vdev_id, state);
1238 if (IS_ERR(skb))
1239 return PTR_ERR(skb);
1240
1241 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->tdls_set_state_cmdid);
1242}
1243
1244static inline int
1245ath10k_wmi_tdls_peer_update(struct ath10k *ar,
1246 const struct wmi_tdls_peer_update_cmd_arg *arg,
1247 const struct wmi_tdls_peer_capab_arg *cap,
1248 const struct wmi_channel_arg *chan)
1249{
1250 struct sk_buff *skb;
1251
1252 if (!ar->wmi.ops->gen_tdls_peer_update)
1253 return -EOPNOTSUPP;
1254
1255 skb = ar->wmi.ops->gen_tdls_peer_update(ar, arg, cap, chan);
1256 if (IS_ERR(skb))
1257 return PTR_ERR(skb);
1258
1259 return ath10k_wmi_cmd_send(ar, skb,
1260 ar->wmi.cmd->tdls_peer_update_cmdid);
1261}
1262
Michal Kazior5b272e32015-03-31 10:26:22 +00001263static inline int
1264ath10k_wmi_adaptive_qcs(struct ath10k *ar, bool enable)
1265{
1266 struct sk_buff *skb;
1267
1268 if (!ar->wmi.ops->gen_adaptive_qcs)
1269 return -EOPNOTSUPP;
1270
1271 skb = ar->wmi.ops->gen_adaptive_qcs(ar, enable);
1272 if (IS_ERR(skb))
1273 return PTR_ERR(skb);
1274
1275 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid);
1276}
1277
Maharaja Kennadyrajan29542662015-10-05 17:56:38 +03001278static inline int
1279ath10k_wmi_pdev_get_tpc_config(struct ath10k *ar, u32 param)
1280{
1281 struct sk_buff *skb;
1282
1283 if (!ar->wmi.ops->gen_pdev_get_tpc_config)
1284 return -EOPNOTSUPP;
1285
1286 skb = ar->wmi.ops->gen_pdev_get_tpc_config(ar, param);
1287
1288 if (IS_ERR(skb))
1289 return PTR_ERR(skb);
1290
1291 return ath10k_wmi_cmd_send(ar, skb,
1292 ar->wmi.cmd->pdev_get_tpc_config_cmdid);
1293}
1294
Manikanta Pubbisettybc6f9ae2015-10-16 15:54:52 +03001295static inline int
1296ath10k_wmi_fw_stats_fill(struct ath10k *ar, struct ath10k_fw_stats *fw_stats,
1297 char *buf)
1298{
1299 if (!ar->wmi.ops->fw_stats_fill)
1300 return -EOPNOTSUPP;
1301
1302 ar->wmi.ops->fw_stats_fill(ar, fw_stats, buf);
1303 return 0;
1304}
Michal Kaziord7579d12014-12-03 10:10:54 +02001305#endif