blob: 058f88b6ff53f87459a049a4ee9c2fbab1c57d05 [file] [log] [blame]
Michal Kaziord7579d12014-12-03 10:10:54 +02001/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#ifndef _WMI_OPS_H_
19#define _WMI_OPS_H_
20
21struct ath10k;
22struct sk_buff;
23
24struct wmi_ops {
25 void (*rx)(struct ath10k *ar, struct sk_buff *skb);
26 void (*map_svc)(const __le32 *in, unsigned long *out, size_t len);
27
28 int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb,
29 struct wmi_scan_ev_arg *arg);
30 int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb,
31 struct wmi_mgmt_rx_ev_arg *arg);
32 int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb,
33 struct wmi_ch_info_ev_arg *arg);
34 int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb,
35 struct wmi_vdev_start_ev_arg *arg);
36 int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb,
37 struct wmi_peer_kick_ev_arg *arg);
38 int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb,
39 struct wmi_swba_ev_arg *arg);
40 int (*pull_phyerr)(struct ath10k *ar, struct sk_buff *skb,
41 struct wmi_phyerr_ev_arg *arg);
42 int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb,
43 struct wmi_svc_rdy_ev_arg *arg);
44 int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb,
45 struct wmi_rdy_ev_arg *arg);
46 int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb,
47 struct ath10k_fw_stats *stats);
48
49 struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
50 struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
51 struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g,
52 u16 rd5g, u16 ctl2g, u16 ctl5g,
53 enum wmi_dfs_region dfs_reg);
54 struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id,
55 u32 value);
56 struct sk_buff *(*gen_init)(struct ath10k *ar);
57 struct sk_buff *(*gen_start_scan)(struct ath10k *ar,
58 const struct wmi_start_scan_arg *arg);
59 struct sk_buff *(*gen_stop_scan)(struct ath10k *ar,
60 const struct wmi_stop_scan_arg *arg);
61 struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id,
62 enum wmi_vdev_type type,
63 enum wmi_vdev_subtype subtype,
64 const u8 macaddr[ETH_ALEN]);
65 struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id);
66 struct sk_buff *(*gen_vdev_start)(struct ath10k *ar,
67 const struct wmi_vdev_start_request_arg *arg,
68 bool restart);
69 struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id);
70 struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid,
71 const u8 *bssid);
72 struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id);
73 struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id,
74 u32 param_id, u32 param_value);
75 struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar,
76 const struct wmi_vdev_install_key_arg *arg);
77 struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar,
78 const struct wmi_vdev_spectral_conf_arg *arg);
79 struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id,
80 u32 trigger, u32 enable);
Michal Kazior6d492fe2015-01-28 09:57:22 +020081 struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id,
82 const struct wmi_wmm_params_all_arg *arg);
Michal Kaziord7579d12014-12-03 10:10:54 +020083 struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id,
84 const u8 peer_addr[ETH_ALEN]);
85 struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id,
86 const u8 peer_addr[ETH_ALEN]);
87 struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id,
88 const u8 peer_addr[ETH_ALEN],
89 u32 tid_bitmap);
90 struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id,
91 const u8 *peer_addr,
92 enum wmi_peer_param param_id,
93 u32 param_value);
94 struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar,
95 const struct wmi_peer_assoc_complete_arg *arg);
96 struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id,
97 enum wmi_sta_ps_mode psmode);
98 struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id,
99 enum wmi_sta_powersave_param param_id,
100 u32 value);
101 struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id,
102 const u8 *mac,
103 enum wmi_ap_ps_peer_param param_id,
104 u32 value);
105 struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar,
106 const struct wmi_scan_chan_list_arg *arg);
107 struct sk_buff *(*gen_beacon_dma)(struct ath10k_vif *arvif);
108 struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar,
Michal Kazior5e752e42015-01-19 09:53:41 +0100109 const struct wmi_wmm_params_all_arg *arg);
Michal Kaziord7579d12014-12-03 10:10:54 +0200110 struct sk_buff *(*gen_request_stats)(struct ath10k *ar,
111 enum wmi_stats_id stats_id);
112 struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar,
113 enum wmi_force_fw_hang_type type,
114 u32 delay_ms);
115 struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
116 struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u32 module_enable);
117 struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
118 struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar);
Rajkumar Manoharanffdd7382014-12-17 12:21:40 +0200119 struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar,
120 u32 period, u32 duration,
121 u32 next_offset,
122 u32 enabled);
Rajkumar Manoharana57a6a22014-12-17 12:22:17 +0200123 struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar);
Rajkumar Manoharandc8ab272015-01-12 14:07:25 +0200124 struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id,
125 const u8 *mac);
Rajkumar Manoharan65c08932015-01-12 14:07:26 +0200126 struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id,
127 const u8 *mac, u32 tid, u32 buf_size);
Rajkumar Manoharan11597412015-01-12 14:07:26 +0200128 struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id,
129 const u8 *mac, u32 tid,
130 u32 status);
Rajkumar Manoharan50abef82015-01-12 14:07:26 +0200131 struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id,
132 const u8 *mac, u32 tid, u32 initiator,
133 u32 reason);
Michal Kaziorbe9ce9d2015-01-13 16:30:11 +0200134 struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id,
135 u32 tim_ie_offset, struct sk_buff *bcn,
136 u32 prb_caps, u32 prb_erp,
137 void *prb_ies, size_t prb_ies_len);
Michal Kazior4c4955f2015-01-13 16:30:11 +0200138 struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id,
139 struct sk_buff *bcn);
Michal Kazior369242b4e2015-01-13 16:30:11 +0200140 struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id,
141 const u8 *p2p_ie);
Janusz Dziedzic0c7e4772015-01-24 12:14:52 +0200142 struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id,
143 const u8 peer_addr[ETH_ALEN],
144 const struct wmi_sta_uapsd_auto_trig_arg *args,
145 u32 num_ac);
Janusz Dziedzic6e8b1882015-01-28 09:57:39 +0200146 struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar,
147 const struct wmi_sta_keepalive_arg *arg);
Michal Kaziord7579d12014-12-03 10:10:54 +0200148};
149
150int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
151
152static inline int
153ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb)
154{
155 if (WARN_ON_ONCE(!ar->wmi.ops->rx))
156 return -EOPNOTSUPP;
157
158 ar->wmi.ops->rx(ar, skb);
159 return 0;
160}
161
162static inline int
163ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out,
164 size_t len)
165{
166 if (!ar->wmi.ops->map_svc)
167 return -EOPNOTSUPP;
168
169 ar->wmi.ops->map_svc(in, out, len);
170 return 0;
171}
172
173static inline int
174ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb,
175 struct wmi_scan_ev_arg *arg)
176{
177 if (!ar->wmi.ops->pull_scan)
178 return -EOPNOTSUPP;
179
180 return ar->wmi.ops->pull_scan(ar, skb, arg);
181}
182
183static inline int
184ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb,
185 struct wmi_mgmt_rx_ev_arg *arg)
186{
187 if (!ar->wmi.ops->pull_mgmt_rx)
188 return -EOPNOTSUPP;
189
190 return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg);
191}
192
193static inline int
194ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb,
195 struct wmi_ch_info_ev_arg *arg)
196{
197 if (!ar->wmi.ops->pull_ch_info)
198 return -EOPNOTSUPP;
199
200 return ar->wmi.ops->pull_ch_info(ar, skb, arg);
201}
202
203static inline int
204ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb,
205 struct wmi_vdev_start_ev_arg *arg)
206{
207 if (!ar->wmi.ops->pull_vdev_start)
208 return -EOPNOTSUPP;
209
210 return ar->wmi.ops->pull_vdev_start(ar, skb, arg);
211}
212
213static inline int
214ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb,
215 struct wmi_peer_kick_ev_arg *arg)
216{
217 if (!ar->wmi.ops->pull_peer_kick)
218 return -EOPNOTSUPP;
219
220 return ar->wmi.ops->pull_peer_kick(ar, skb, arg);
221}
222
223static inline int
224ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb,
225 struct wmi_swba_ev_arg *arg)
226{
227 if (!ar->wmi.ops->pull_swba)
228 return -EOPNOTSUPP;
229
230 return ar->wmi.ops->pull_swba(ar, skb, arg);
231}
232
233static inline int
234ath10k_wmi_pull_phyerr(struct ath10k *ar, struct sk_buff *skb,
235 struct wmi_phyerr_ev_arg *arg)
236{
237 if (!ar->wmi.ops->pull_phyerr)
238 return -EOPNOTSUPP;
239
240 return ar->wmi.ops->pull_phyerr(ar, skb, arg);
241}
242
243static inline int
244ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb,
245 struct wmi_svc_rdy_ev_arg *arg)
246{
247 if (!ar->wmi.ops->pull_svc_rdy)
248 return -EOPNOTSUPP;
249
250 return ar->wmi.ops->pull_svc_rdy(ar, skb, arg);
251}
252
253static inline int
254ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb,
255 struct wmi_rdy_ev_arg *arg)
256{
257 if (!ar->wmi.ops->pull_rdy)
258 return -EOPNOTSUPP;
259
260 return ar->wmi.ops->pull_rdy(ar, skb, arg);
261}
262
263static inline int
264ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
265 struct ath10k_fw_stats *stats)
266{
267 if (!ar->wmi.ops->pull_fw_stats)
268 return -EOPNOTSUPP;
269
270 return ar->wmi.ops->pull_fw_stats(ar, skb, stats);
271}
272
273static inline int
274ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
275{
276 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
277 struct sk_buff *skb;
278 int ret;
279
280 if (!ar->wmi.ops->gen_mgmt_tx)
281 return -EOPNOTSUPP;
282
283 skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu);
284 if (IS_ERR(skb))
285 return PTR_ERR(skb);
286
287 ret = ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->mgmt_tx_cmdid);
288 if (ret)
289 return ret;
290
291 /* FIXME There's no ACK event for Management Tx. This probably
292 * shouldn't be called here either. */
293 info->flags |= IEEE80211_TX_STAT_ACK;
294 ieee80211_tx_status_irqsafe(ar->hw, msdu);
295
296 return 0;
297}
298
299static inline int
300ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
301 u16 ctl2g, u16 ctl5g,
302 enum wmi_dfs_region dfs_reg)
303{
304 struct sk_buff *skb;
305
306 if (!ar->wmi.ops->gen_pdev_set_rd)
307 return -EOPNOTSUPP;
308
309 skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g,
310 dfs_reg);
311 if (IS_ERR(skb))
312 return PTR_ERR(skb);
313
314 return ath10k_wmi_cmd_send(ar, skb,
315 ar->wmi.cmd->pdev_set_regdomain_cmdid);
316}
317
318static inline int
319ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
320{
321 struct sk_buff *skb;
322
323 if (!ar->wmi.ops->gen_pdev_suspend)
324 return -EOPNOTSUPP;
325
326 skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt);
327 if (IS_ERR(skb))
328 return PTR_ERR(skb);
329
330 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
331}
332
333static inline int
334ath10k_wmi_pdev_resume_target(struct ath10k *ar)
335{
336 struct sk_buff *skb;
337
338 if (!ar->wmi.ops->gen_pdev_resume)
339 return -EOPNOTSUPP;
340
341 skb = ar->wmi.ops->gen_pdev_resume(ar);
342 if (IS_ERR(skb))
343 return PTR_ERR(skb);
344
345 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
346}
347
348static inline int
349ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
350{
351 struct sk_buff *skb;
352
353 if (!ar->wmi.ops->gen_pdev_set_param)
354 return -EOPNOTSUPP;
355
356 skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value);
357 if (IS_ERR(skb))
358 return PTR_ERR(skb);
359
360 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
361}
362
363static inline int
364ath10k_wmi_cmd_init(struct ath10k *ar)
365{
366 struct sk_buff *skb;
367
368 if (!ar->wmi.ops->gen_init)
369 return -EOPNOTSUPP;
370
371 skb = ar->wmi.ops->gen_init(ar);
372 if (IS_ERR(skb))
373 return PTR_ERR(skb);
374
375 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid);
376}
377
378static inline int
379ath10k_wmi_start_scan(struct ath10k *ar,
380 const struct wmi_start_scan_arg *arg)
381{
382 struct sk_buff *skb;
383
384 if (!ar->wmi.ops->gen_start_scan)
385 return -EOPNOTSUPP;
386
387 skb = ar->wmi.ops->gen_start_scan(ar, arg);
388 if (IS_ERR(skb))
389 return PTR_ERR(skb);
390
391 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
392}
393
394static inline int
395ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
396{
397 struct sk_buff *skb;
398
399 if (!ar->wmi.ops->gen_stop_scan)
400 return -EOPNOTSUPP;
401
402 skb = ar->wmi.ops->gen_stop_scan(ar, arg);
403 if (IS_ERR(skb))
404 return PTR_ERR(skb);
405
406 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
407}
408
409static inline int
410ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
411 enum wmi_vdev_type type,
412 enum wmi_vdev_subtype subtype,
413 const u8 macaddr[ETH_ALEN])
414{
415 struct sk_buff *skb;
416
417 if (!ar->wmi.ops->gen_vdev_create)
418 return -EOPNOTSUPP;
419
420 skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr);
421 if (IS_ERR(skb))
422 return PTR_ERR(skb);
423
424 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
425}
426
427static inline int
428ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
429{
430 struct sk_buff *skb;
431
432 if (!ar->wmi.ops->gen_vdev_delete)
433 return -EOPNOTSUPP;
434
435 skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id);
436 if (IS_ERR(skb))
437 return PTR_ERR(skb);
438
439 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
440}
441
442static inline int
443ath10k_wmi_vdev_start(struct ath10k *ar,
444 const struct wmi_vdev_start_request_arg *arg)
445{
446 struct sk_buff *skb;
447
448 if (!ar->wmi.ops->gen_vdev_start)
449 return -EOPNOTSUPP;
450
451 skb = ar->wmi.ops->gen_vdev_start(ar, arg, false);
452 if (IS_ERR(skb))
453 return PTR_ERR(skb);
454
455 return ath10k_wmi_cmd_send(ar, skb,
456 ar->wmi.cmd->vdev_start_request_cmdid);
457}
458
459static inline int
460ath10k_wmi_vdev_restart(struct ath10k *ar,
461 const struct wmi_vdev_start_request_arg *arg)
462{
463 struct sk_buff *skb;
464
465 if (!ar->wmi.ops->gen_vdev_start)
466 return -EOPNOTSUPP;
467
468 skb = ar->wmi.ops->gen_vdev_start(ar, arg, true);
469 if (IS_ERR(skb))
470 return PTR_ERR(skb);
471
472 return ath10k_wmi_cmd_send(ar, skb,
473 ar->wmi.cmd->vdev_restart_request_cmdid);
474}
475
476static inline int
477ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
478{
479 struct sk_buff *skb;
480
481 if (!ar->wmi.ops->gen_vdev_stop)
482 return -EOPNOTSUPP;
483
484 skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id);
485 if (IS_ERR(skb))
486 return PTR_ERR(skb);
487
488 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
489}
490
491static inline int
492ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
493{
494 struct sk_buff *skb;
495
496 if (!ar->wmi.ops->gen_vdev_up)
497 return -EOPNOTSUPP;
498
499 skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid);
500 if (IS_ERR(skb))
501 return PTR_ERR(skb);
502
503 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
504}
505
506static inline int
507ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
508{
509 struct sk_buff *skb;
510
511 if (!ar->wmi.ops->gen_vdev_down)
512 return -EOPNOTSUPP;
513
514 skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id);
515 if (IS_ERR(skb))
516 return PTR_ERR(skb);
517
518 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
519}
520
521static inline int
522ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id,
523 u32 param_value)
524{
525 struct sk_buff *skb;
526
527 if (!ar->wmi.ops->gen_vdev_set_param)
528 return -EOPNOTSUPP;
529
530 skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id,
531 param_value);
532 if (IS_ERR(skb))
533 return PTR_ERR(skb);
534
535 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
536}
537
538static inline int
539ath10k_wmi_vdev_install_key(struct ath10k *ar,
540 const struct wmi_vdev_install_key_arg *arg)
541{
542 struct sk_buff *skb;
543
544 if (!ar->wmi.ops->gen_vdev_install_key)
545 return -EOPNOTSUPP;
546
547 skb = ar->wmi.ops->gen_vdev_install_key(ar, arg);
548 if (IS_ERR(skb))
549 return PTR_ERR(skb);
550
551 return ath10k_wmi_cmd_send(ar, skb,
552 ar->wmi.cmd->vdev_install_key_cmdid);
553}
554
555static inline int
556ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
557 const struct wmi_vdev_spectral_conf_arg *arg)
558{
559 struct sk_buff *skb;
560 u32 cmd_id;
561
562 skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg);
563 if (IS_ERR(skb))
564 return PTR_ERR(skb);
565
566 cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid;
567 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
568}
569
570static inline int
571ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
572 u32 enable)
573{
574 struct sk_buff *skb;
575 u32 cmd_id;
576
577 skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger,
578 enable);
579 if (IS_ERR(skb))
580 return PTR_ERR(skb);
581
582 cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid;
583 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
584}
585
586static inline int
Janusz Dziedzic0c7e4772015-01-24 12:14:52 +0200587ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
588 const u8 peer_addr[ETH_ALEN],
589 const struct wmi_sta_uapsd_auto_trig_arg *args,
590 u32 num_ac)
591{
592 struct sk_buff *skb;
593 u32 cmd_id;
594
595 if (!ar->wmi.ops->gen_vdev_sta_uapsd)
596 return -EOPNOTSUPP;
597
598 skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args,
599 num_ac);
600 if (IS_ERR(skb))
601 return PTR_ERR(skb);
602
603 cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid;
604 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
605}
606
607static inline int
Michal Kazior6d492fe2015-01-28 09:57:22 +0200608ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
609 const struct wmi_wmm_params_all_arg *arg)
610{
611 struct sk_buff *skb;
612 u32 cmd_id;
613
614 skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg);
615 if (IS_ERR(skb))
616 return PTR_ERR(skb);
617
618 cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid;
619 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
620}
621
622static inline int
Michal Kaziord7579d12014-12-03 10:10:54 +0200623ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
624 const u8 peer_addr[ETH_ALEN])
625{
626 struct sk_buff *skb;
627
628 if (!ar->wmi.ops->gen_peer_create)
629 return -EOPNOTSUPP;
630
631 skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr);
632 if (IS_ERR(skb))
633 return PTR_ERR(skb);
634
635 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
636}
637
638static inline int
639ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
640 const u8 peer_addr[ETH_ALEN])
641{
642 struct sk_buff *skb;
643
644 if (!ar->wmi.ops->gen_peer_delete)
645 return -EOPNOTSUPP;
646
647 skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr);
648 if (IS_ERR(skb))
649 return PTR_ERR(skb);
650
651 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
652}
653
654static inline int
655ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
656 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
657{
658 struct sk_buff *skb;
659
660 if (!ar->wmi.ops->gen_peer_flush)
661 return -EOPNOTSUPP;
662
663 skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap);
664 if (IS_ERR(skb))
665 return PTR_ERR(skb);
666
667 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
668}
669
670static inline int
671ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr,
672 enum wmi_peer_param param_id, u32 param_value)
673{
674 struct sk_buff *skb;
675
676 if (!ar->wmi.ops->gen_peer_set_param)
677 return -EOPNOTSUPP;
678
679 skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id,
680 param_value);
681 if (IS_ERR(skb))
682 return PTR_ERR(skb);
683
684 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
685}
686
687static inline int
688ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
689 enum wmi_sta_ps_mode psmode)
690{
691 struct sk_buff *skb;
692
693 if (!ar->wmi.ops->gen_set_psmode)
694 return -EOPNOTSUPP;
695
696 skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode);
697 if (IS_ERR(skb))
698 return PTR_ERR(skb);
699
700 return ath10k_wmi_cmd_send(ar, skb,
701 ar->wmi.cmd->sta_powersave_mode_cmdid);
702}
703
704static inline int
705ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
706 enum wmi_sta_powersave_param param_id, u32 value)
707{
708 struct sk_buff *skb;
709
710 if (!ar->wmi.ops->gen_set_sta_ps)
711 return -EOPNOTSUPP;
712
713 skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value);
714 if (IS_ERR(skb))
715 return PTR_ERR(skb);
716
717 return ath10k_wmi_cmd_send(ar, skb,
718 ar->wmi.cmd->sta_powersave_param_cmdid);
719}
720
721static inline int
722ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
723 enum wmi_ap_ps_peer_param param_id, u32 value)
724{
725 struct sk_buff *skb;
726
727 if (!ar->wmi.ops->gen_set_ap_ps)
728 return -EOPNOTSUPP;
729
730 skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value);
731 if (IS_ERR(skb))
732 return PTR_ERR(skb);
733
734 return ath10k_wmi_cmd_send(ar, skb,
735 ar->wmi.cmd->ap_ps_peer_param_cmdid);
736}
737
738static inline int
739ath10k_wmi_scan_chan_list(struct ath10k *ar,
740 const struct wmi_scan_chan_list_arg *arg)
741{
742 struct sk_buff *skb;
743
744 if (!ar->wmi.ops->gen_scan_chan_list)
745 return -EOPNOTSUPP;
746
747 skb = ar->wmi.ops->gen_scan_chan_list(ar, arg);
748 if (IS_ERR(skb))
749 return PTR_ERR(skb);
750
751 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
752}
753
754static inline int
755ath10k_wmi_peer_assoc(struct ath10k *ar,
756 const struct wmi_peer_assoc_complete_arg *arg)
757{
758 struct sk_buff *skb;
759
760 if (!ar->wmi.ops->gen_peer_assoc)
761 return -EOPNOTSUPP;
762
763 skb = ar->wmi.ops->gen_peer_assoc(ar, arg);
764 if (IS_ERR(skb))
765 return PTR_ERR(skb);
766
767 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
768}
769
770static inline int
771ath10k_wmi_beacon_send_ref_nowait(struct ath10k_vif *arvif)
772{
773 struct ath10k *ar = arvif->ar;
774 struct sk_buff *skb;
775 int ret;
776
777 if (!ar->wmi.ops->gen_beacon_dma)
778 return -EOPNOTSUPP;
779
780 skb = ar->wmi.ops->gen_beacon_dma(arvif);
781 if (IS_ERR(skb))
782 return PTR_ERR(skb);
783
784 ret = ath10k_wmi_cmd_send_nowait(ar, skb,
785 ar->wmi.cmd->pdev_send_bcn_cmdid);
786 if (ret) {
787 dev_kfree_skb(skb);
788 return ret;
789 }
790
791 return 0;
792}
793
794static inline int
795ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
Michal Kazior5e752e42015-01-19 09:53:41 +0100796 const struct wmi_wmm_params_all_arg *arg)
Michal Kaziord7579d12014-12-03 10:10:54 +0200797{
798 struct sk_buff *skb;
799
800 if (!ar->wmi.ops->gen_pdev_set_wmm)
801 return -EOPNOTSUPP;
802
803 skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg);
804 if (IS_ERR(skb))
805 return PTR_ERR(skb);
806
807 return ath10k_wmi_cmd_send(ar, skb,
808 ar->wmi.cmd->pdev_set_wmm_params_cmdid);
809}
810
811static inline int
812ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id)
813{
814 struct sk_buff *skb;
815
816 if (!ar->wmi.ops->gen_request_stats)
817 return -EOPNOTSUPP;
818
819 skb = ar->wmi.ops->gen_request_stats(ar, stats_id);
820 if (IS_ERR(skb))
821 return PTR_ERR(skb);
822
823 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
824}
825
826static inline int
827ath10k_wmi_force_fw_hang(struct ath10k *ar,
828 enum wmi_force_fw_hang_type type, u32 delay_ms)
829{
830 struct sk_buff *skb;
831
832 if (!ar->wmi.ops->gen_force_fw_hang)
833 return -EOPNOTSUPP;
834
835 skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms);
836 if (IS_ERR(skb))
837 return PTR_ERR(skb);
838
839 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
840}
841
842static inline int
843ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable)
844{
845 struct sk_buff *skb;
846
847 if (!ar->wmi.ops->gen_dbglog_cfg)
848 return -EOPNOTSUPP;
849
850 skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable);
851 if (IS_ERR(skb))
852 return PTR_ERR(skb);
853
854 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
855}
856
857static inline int
858ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter)
859{
860 struct sk_buff *skb;
861
862 if (!ar->wmi.ops->gen_pktlog_enable)
863 return -EOPNOTSUPP;
864
865 skb = ar->wmi.ops->gen_pktlog_enable(ar, filter);
866 if (IS_ERR(skb))
867 return PTR_ERR(skb);
868
869 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid);
870}
871
872static inline int
873ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar)
874{
875 struct sk_buff *skb;
876
877 if (!ar->wmi.ops->gen_pktlog_disable)
878 return -EOPNOTSUPP;
879
880 skb = ar->wmi.ops->gen_pktlog_disable(ar);
881 if (IS_ERR(skb))
882 return PTR_ERR(skb);
883
884 return ath10k_wmi_cmd_send(ar, skb,
885 ar->wmi.cmd->pdev_pktlog_disable_cmdid);
886}
887
Rajkumar Manoharanffdd7382014-12-17 12:21:40 +0200888static inline int
889ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration,
890 u32 next_offset, u32 enabled)
891{
892 struct sk_buff *skb;
893
894 if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
895 return -EOPNOTSUPP;
896
897 skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration,
898 next_offset, enabled);
899 if (IS_ERR(skb))
900 return PTR_ERR(skb);
901
902 return ath10k_wmi_cmd_send(ar, skb,
903 ar->wmi.cmd->pdev_set_quiet_mode_cmdid);
904}
905
Rajkumar Manoharana57a6a22014-12-17 12:22:17 +0200906static inline int
907ath10k_wmi_pdev_get_temperature(struct ath10k *ar)
908{
909 struct sk_buff *skb;
910
911 if (!ar->wmi.ops->gen_pdev_get_temperature)
912 return -EOPNOTSUPP;
913
914 skb = ar->wmi.ops->gen_pdev_get_temperature(ar);
915 if (IS_ERR(skb))
916 return PTR_ERR(skb);
917
918 return ath10k_wmi_cmd_send(ar, skb,
919 ar->wmi.cmd->pdev_get_temperature_cmdid);
920}
921
Rajkumar Manoharandc8ab272015-01-12 14:07:25 +0200922static inline int
923ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac)
924{
925 struct sk_buff *skb;
926
927 if (!ar->wmi.ops->gen_addba_clear_resp)
928 return -EOPNOTSUPP;
929
930 skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac);
931 if (IS_ERR(skb))
932 return PTR_ERR(skb);
933
934 return ath10k_wmi_cmd_send(ar, skb,
935 ar->wmi.cmd->addba_clear_resp_cmdid);
936}
937
Rajkumar Manoharan65c08932015-01-12 14:07:26 +0200938static inline int
939ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
940 u32 tid, u32 buf_size)
941{
942 struct sk_buff *skb;
943
944 if (!ar->wmi.ops->gen_addba_send)
945 return -EOPNOTSUPP;
946
947 skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size);
948 if (IS_ERR(skb))
949 return PTR_ERR(skb);
950
951 return ath10k_wmi_cmd_send(ar, skb,
952 ar->wmi.cmd->addba_send_cmdid);
953}
954
Rajkumar Manoharan11597412015-01-12 14:07:26 +0200955static inline int
956ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
957 u32 tid, u32 status)
958{
959 struct sk_buff *skb;
960
961 if (!ar->wmi.ops->gen_addba_set_resp)
962 return -EOPNOTSUPP;
963
964 skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status);
965 if (IS_ERR(skb))
966 return PTR_ERR(skb);
967
968 return ath10k_wmi_cmd_send(ar, skb,
969 ar->wmi.cmd->addba_set_resp_cmdid);
970}
971
Rajkumar Manoharan50abef82015-01-12 14:07:26 +0200972static inline int
973ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
974 u32 tid, u32 initiator, u32 reason)
975{
976 struct sk_buff *skb;
977
978 if (!ar->wmi.ops->gen_delba_send)
979 return -EOPNOTSUPP;
980
981 skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator,
982 reason);
983 if (IS_ERR(skb))
984 return PTR_ERR(skb);
985
986 return ath10k_wmi_cmd_send(ar, skb,
987 ar->wmi.cmd->delba_send_cmdid);
988}
989
Michal Kaziorbe9ce9d2015-01-13 16:30:11 +0200990static inline int
991ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset,
992 struct sk_buff *bcn, u32 prb_caps, u32 prb_erp,
993 void *prb_ies, size_t prb_ies_len)
994{
995 struct sk_buff *skb;
996
997 if (!ar->wmi.ops->gen_bcn_tmpl)
998 return -EOPNOTSUPP;
999
1000 skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn,
1001 prb_caps, prb_erp, prb_ies,
1002 prb_ies_len);
1003 if (IS_ERR(skb))
1004 return PTR_ERR(skb);
1005
1006 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid);
1007}
1008
Michal Kazior4c4955f2015-01-13 16:30:11 +02001009static inline int
1010ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb)
1011{
1012 struct sk_buff *skb;
1013
1014 if (!ar->wmi.ops->gen_prb_tmpl)
1015 return -EOPNOTSUPP;
1016
1017 skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb);
1018 if (IS_ERR(skb))
1019 return PTR_ERR(skb);
1020
1021 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid);
1022}
1023
Michal Kazior369242b4e2015-01-13 16:30:11 +02001024static inline int
1025ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie)
1026{
1027 struct sk_buff *skb;
1028
1029 if (!ar->wmi.ops->gen_p2p_go_bcn_ie)
1030 return -EOPNOTSUPP;
1031
1032 skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie);
1033 if (IS_ERR(skb))
1034 return PTR_ERR(skb);
1035
1036 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie);
1037}
1038
Janusz Dziedzic6e8b1882015-01-28 09:57:39 +02001039static inline int
1040ath10k_wmi_sta_keepalive(struct ath10k *ar,
1041 const struct wmi_sta_keepalive_arg *arg)
1042{
1043 struct sk_buff *skb;
1044 u32 cmd_id;
1045
1046 if (!ar->wmi.ops->gen_sta_keepalive)
1047 return -EOPNOTSUPP;
1048
1049 skb = ar->wmi.ops->gen_sta_keepalive(ar, arg);
1050 if (IS_ERR(skb))
1051 return PTR_ERR(skb);
1052
1053 cmd_id = ar->wmi.cmd->sta_keepalive_cmd;
1054 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1055}
1056
Michal Kaziord7579d12014-12-03 10:10:54 +02001057#endif