blob: 3a3d15e65e0af3bb1c127fa2a6311f618e12c185 [file] [log] [blame]
Michal Kaziord7579d12014-12-03 10:10:54 +02001/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#ifndef _WMI_OPS_H_
19#define _WMI_OPS_H_
20
21struct ath10k;
22struct sk_buff;
23
24struct wmi_ops {
25 void (*rx)(struct ath10k *ar, struct sk_buff *skb);
26 void (*map_svc)(const __le32 *in, unsigned long *out, size_t len);
27
28 int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb,
29 struct wmi_scan_ev_arg *arg);
30 int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb,
31 struct wmi_mgmt_rx_ev_arg *arg);
32 int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb,
33 struct wmi_ch_info_ev_arg *arg);
34 int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb,
35 struct wmi_vdev_start_ev_arg *arg);
36 int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb,
37 struct wmi_peer_kick_ev_arg *arg);
38 int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb,
39 struct wmi_swba_ev_arg *arg);
40 int (*pull_phyerr)(struct ath10k *ar, struct sk_buff *skb,
41 struct wmi_phyerr_ev_arg *arg);
42 int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb,
43 struct wmi_svc_rdy_ev_arg *arg);
44 int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb,
45 struct wmi_rdy_ev_arg *arg);
46 int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb,
47 struct ath10k_fw_stats *stats);
48
49 struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
50 struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
51 struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g,
52 u16 rd5g, u16 ctl2g, u16 ctl5g,
53 enum wmi_dfs_region dfs_reg);
54 struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id,
55 u32 value);
56 struct sk_buff *(*gen_init)(struct ath10k *ar);
57 struct sk_buff *(*gen_start_scan)(struct ath10k *ar,
58 const struct wmi_start_scan_arg *arg);
59 struct sk_buff *(*gen_stop_scan)(struct ath10k *ar,
60 const struct wmi_stop_scan_arg *arg);
61 struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id,
62 enum wmi_vdev_type type,
63 enum wmi_vdev_subtype subtype,
64 const u8 macaddr[ETH_ALEN]);
65 struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id);
66 struct sk_buff *(*gen_vdev_start)(struct ath10k *ar,
67 const struct wmi_vdev_start_request_arg *arg,
68 bool restart);
69 struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id);
70 struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid,
71 const u8 *bssid);
72 struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id);
73 struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id,
74 u32 param_id, u32 param_value);
75 struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar,
76 const struct wmi_vdev_install_key_arg *arg);
77 struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar,
78 const struct wmi_vdev_spectral_conf_arg *arg);
79 struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id,
80 u32 trigger, u32 enable);
81 struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id,
82 const u8 peer_addr[ETH_ALEN]);
83 struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id,
84 const u8 peer_addr[ETH_ALEN]);
85 struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id,
86 const u8 peer_addr[ETH_ALEN],
87 u32 tid_bitmap);
88 struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id,
89 const u8 *peer_addr,
90 enum wmi_peer_param param_id,
91 u32 param_value);
92 struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar,
93 const struct wmi_peer_assoc_complete_arg *arg);
94 struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id,
95 enum wmi_sta_ps_mode psmode);
96 struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id,
97 enum wmi_sta_powersave_param param_id,
98 u32 value);
99 struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id,
100 const u8 *mac,
101 enum wmi_ap_ps_peer_param param_id,
102 u32 value);
103 struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar,
104 const struct wmi_scan_chan_list_arg *arg);
105 struct sk_buff *(*gen_beacon_dma)(struct ath10k_vif *arvif);
106 struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar,
107 const struct wmi_pdev_set_wmm_params_arg *arg);
108 struct sk_buff *(*gen_request_stats)(struct ath10k *ar,
109 enum wmi_stats_id stats_id);
110 struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar,
111 enum wmi_force_fw_hang_type type,
112 u32 delay_ms);
113 struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
114 struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u32 module_enable);
115 struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
116 struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar);
Rajkumar Manoharanffdd7382014-12-17 12:21:40 +0200117 struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar,
118 u32 period, u32 duration,
119 u32 next_offset,
120 u32 enabled);
Rajkumar Manoharana57a6a22014-12-17 12:22:17 +0200121 struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar);
Rajkumar Manoharandc8ab272015-01-12 14:07:25 +0200122 struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id,
123 const u8 *mac);
Rajkumar Manoharan65c08932015-01-12 14:07:26 +0200124 struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id,
125 const u8 *mac, u32 tid, u32 buf_size);
Rajkumar Manoharan11597412015-01-12 14:07:26 +0200126 struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id,
127 const u8 *mac, u32 tid,
128 u32 status);
Rajkumar Manoharan50abef82015-01-12 14:07:26 +0200129 struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id,
130 const u8 *mac, u32 tid, u32 initiator,
131 u32 reason);
Michal Kaziord7579d12014-12-03 10:10:54 +0200132};
133
134int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
135
136static inline int
137ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb)
138{
139 if (WARN_ON_ONCE(!ar->wmi.ops->rx))
140 return -EOPNOTSUPP;
141
142 ar->wmi.ops->rx(ar, skb);
143 return 0;
144}
145
146static inline int
147ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out,
148 size_t len)
149{
150 if (!ar->wmi.ops->map_svc)
151 return -EOPNOTSUPP;
152
153 ar->wmi.ops->map_svc(in, out, len);
154 return 0;
155}
156
157static inline int
158ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb,
159 struct wmi_scan_ev_arg *arg)
160{
161 if (!ar->wmi.ops->pull_scan)
162 return -EOPNOTSUPP;
163
164 return ar->wmi.ops->pull_scan(ar, skb, arg);
165}
166
167static inline int
168ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb,
169 struct wmi_mgmt_rx_ev_arg *arg)
170{
171 if (!ar->wmi.ops->pull_mgmt_rx)
172 return -EOPNOTSUPP;
173
174 return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg);
175}
176
177static inline int
178ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb,
179 struct wmi_ch_info_ev_arg *arg)
180{
181 if (!ar->wmi.ops->pull_ch_info)
182 return -EOPNOTSUPP;
183
184 return ar->wmi.ops->pull_ch_info(ar, skb, arg);
185}
186
187static inline int
188ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb,
189 struct wmi_vdev_start_ev_arg *arg)
190{
191 if (!ar->wmi.ops->pull_vdev_start)
192 return -EOPNOTSUPP;
193
194 return ar->wmi.ops->pull_vdev_start(ar, skb, arg);
195}
196
197static inline int
198ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb,
199 struct wmi_peer_kick_ev_arg *arg)
200{
201 if (!ar->wmi.ops->pull_peer_kick)
202 return -EOPNOTSUPP;
203
204 return ar->wmi.ops->pull_peer_kick(ar, skb, arg);
205}
206
207static inline int
208ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb,
209 struct wmi_swba_ev_arg *arg)
210{
211 if (!ar->wmi.ops->pull_swba)
212 return -EOPNOTSUPP;
213
214 return ar->wmi.ops->pull_swba(ar, skb, arg);
215}
216
217static inline int
218ath10k_wmi_pull_phyerr(struct ath10k *ar, struct sk_buff *skb,
219 struct wmi_phyerr_ev_arg *arg)
220{
221 if (!ar->wmi.ops->pull_phyerr)
222 return -EOPNOTSUPP;
223
224 return ar->wmi.ops->pull_phyerr(ar, skb, arg);
225}
226
227static inline int
228ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb,
229 struct wmi_svc_rdy_ev_arg *arg)
230{
231 if (!ar->wmi.ops->pull_svc_rdy)
232 return -EOPNOTSUPP;
233
234 return ar->wmi.ops->pull_svc_rdy(ar, skb, arg);
235}
236
237static inline int
238ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb,
239 struct wmi_rdy_ev_arg *arg)
240{
241 if (!ar->wmi.ops->pull_rdy)
242 return -EOPNOTSUPP;
243
244 return ar->wmi.ops->pull_rdy(ar, skb, arg);
245}
246
247static inline int
248ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
249 struct ath10k_fw_stats *stats)
250{
251 if (!ar->wmi.ops->pull_fw_stats)
252 return -EOPNOTSUPP;
253
254 return ar->wmi.ops->pull_fw_stats(ar, skb, stats);
255}
256
257static inline int
258ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
259{
260 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
261 struct sk_buff *skb;
262 int ret;
263
264 if (!ar->wmi.ops->gen_mgmt_tx)
265 return -EOPNOTSUPP;
266
267 skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu);
268 if (IS_ERR(skb))
269 return PTR_ERR(skb);
270
271 ret = ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->mgmt_tx_cmdid);
272 if (ret)
273 return ret;
274
275 /* FIXME There's no ACK event for Management Tx. This probably
276 * shouldn't be called here either. */
277 info->flags |= IEEE80211_TX_STAT_ACK;
278 ieee80211_tx_status_irqsafe(ar->hw, msdu);
279
280 return 0;
281}
282
283static inline int
284ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
285 u16 ctl2g, u16 ctl5g,
286 enum wmi_dfs_region dfs_reg)
287{
288 struct sk_buff *skb;
289
290 if (!ar->wmi.ops->gen_pdev_set_rd)
291 return -EOPNOTSUPP;
292
293 skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g,
294 dfs_reg);
295 if (IS_ERR(skb))
296 return PTR_ERR(skb);
297
298 return ath10k_wmi_cmd_send(ar, skb,
299 ar->wmi.cmd->pdev_set_regdomain_cmdid);
300}
301
302static inline int
303ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
304{
305 struct sk_buff *skb;
306
307 if (!ar->wmi.ops->gen_pdev_suspend)
308 return -EOPNOTSUPP;
309
310 skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt);
311 if (IS_ERR(skb))
312 return PTR_ERR(skb);
313
314 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
315}
316
317static inline int
318ath10k_wmi_pdev_resume_target(struct ath10k *ar)
319{
320 struct sk_buff *skb;
321
322 if (!ar->wmi.ops->gen_pdev_resume)
323 return -EOPNOTSUPP;
324
325 skb = ar->wmi.ops->gen_pdev_resume(ar);
326 if (IS_ERR(skb))
327 return PTR_ERR(skb);
328
329 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
330}
331
332static inline int
333ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
334{
335 struct sk_buff *skb;
336
337 if (!ar->wmi.ops->gen_pdev_set_param)
338 return -EOPNOTSUPP;
339
340 skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value);
341 if (IS_ERR(skb))
342 return PTR_ERR(skb);
343
344 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
345}
346
347static inline int
348ath10k_wmi_cmd_init(struct ath10k *ar)
349{
350 struct sk_buff *skb;
351
352 if (!ar->wmi.ops->gen_init)
353 return -EOPNOTSUPP;
354
355 skb = ar->wmi.ops->gen_init(ar);
356 if (IS_ERR(skb))
357 return PTR_ERR(skb);
358
359 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid);
360}
361
362static inline int
363ath10k_wmi_start_scan(struct ath10k *ar,
364 const struct wmi_start_scan_arg *arg)
365{
366 struct sk_buff *skb;
367
368 if (!ar->wmi.ops->gen_start_scan)
369 return -EOPNOTSUPP;
370
371 skb = ar->wmi.ops->gen_start_scan(ar, arg);
372 if (IS_ERR(skb))
373 return PTR_ERR(skb);
374
375 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
376}
377
378static inline int
379ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
380{
381 struct sk_buff *skb;
382
383 if (!ar->wmi.ops->gen_stop_scan)
384 return -EOPNOTSUPP;
385
386 skb = ar->wmi.ops->gen_stop_scan(ar, arg);
387 if (IS_ERR(skb))
388 return PTR_ERR(skb);
389
390 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
391}
392
393static inline int
394ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
395 enum wmi_vdev_type type,
396 enum wmi_vdev_subtype subtype,
397 const u8 macaddr[ETH_ALEN])
398{
399 struct sk_buff *skb;
400
401 if (!ar->wmi.ops->gen_vdev_create)
402 return -EOPNOTSUPP;
403
404 skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr);
405 if (IS_ERR(skb))
406 return PTR_ERR(skb);
407
408 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
409}
410
411static inline int
412ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
413{
414 struct sk_buff *skb;
415
416 if (!ar->wmi.ops->gen_vdev_delete)
417 return -EOPNOTSUPP;
418
419 skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id);
420 if (IS_ERR(skb))
421 return PTR_ERR(skb);
422
423 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
424}
425
426static inline int
427ath10k_wmi_vdev_start(struct ath10k *ar,
428 const struct wmi_vdev_start_request_arg *arg)
429{
430 struct sk_buff *skb;
431
432 if (!ar->wmi.ops->gen_vdev_start)
433 return -EOPNOTSUPP;
434
435 skb = ar->wmi.ops->gen_vdev_start(ar, arg, false);
436 if (IS_ERR(skb))
437 return PTR_ERR(skb);
438
439 return ath10k_wmi_cmd_send(ar, skb,
440 ar->wmi.cmd->vdev_start_request_cmdid);
441}
442
443static inline int
444ath10k_wmi_vdev_restart(struct ath10k *ar,
445 const struct wmi_vdev_start_request_arg *arg)
446{
447 struct sk_buff *skb;
448
449 if (!ar->wmi.ops->gen_vdev_start)
450 return -EOPNOTSUPP;
451
452 skb = ar->wmi.ops->gen_vdev_start(ar, arg, true);
453 if (IS_ERR(skb))
454 return PTR_ERR(skb);
455
456 return ath10k_wmi_cmd_send(ar, skb,
457 ar->wmi.cmd->vdev_restart_request_cmdid);
458}
459
460static inline int
461ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
462{
463 struct sk_buff *skb;
464
465 if (!ar->wmi.ops->gen_vdev_stop)
466 return -EOPNOTSUPP;
467
468 skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id);
469 if (IS_ERR(skb))
470 return PTR_ERR(skb);
471
472 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
473}
474
475static inline int
476ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
477{
478 struct sk_buff *skb;
479
480 if (!ar->wmi.ops->gen_vdev_up)
481 return -EOPNOTSUPP;
482
483 skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid);
484 if (IS_ERR(skb))
485 return PTR_ERR(skb);
486
487 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
488}
489
490static inline int
491ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
492{
493 struct sk_buff *skb;
494
495 if (!ar->wmi.ops->gen_vdev_down)
496 return -EOPNOTSUPP;
497
498 skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id);
499 if (IS_ERR(skb))
500 return PTR_ERR(skb);
501
502 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
503}
504
505static inline int
506ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id,
507 u32 param_value)
508{
509 struct sk_buff *skb;
510
511 if (!ar->wmi.ops->gen_vdev_set_param)
512 return -EOPNOTSUPP;
513
514 skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id,
515 param_value);
516 if (IS_ERR(skb))
517 return PTR_ERR(skb);
518
519 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
520}
521
522static inline int
523ath10k_wmi_vdev_install_key(struct ath10k *ar,
524 const struct wmi_vdev_install_key_arg *arg)
525{
526 struct sk_buff *skb;
527
528 if (!ar->wmi.ops->gen_vdev_install_key)
529 return -EOPNOTSUPP;
530
531 skb = ar->wmi.ops->gen_vdev_install_key(ar, arg);
532 if (IS_ERR(skb))
533 return PTR_ERR(skb);
534
535 return ath10k_wmi_cmd_send(ar, skb,
536 ar->wmi.cmd->vdev_install_key_cmdid);
537}
538
539static inline int
540ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
541 const struct wmi_vdev_spectral_conf_arg *arg)
542{
543 struct sk_buff *skb;
544 u32 cmd_id;
545
546 skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg);
547 if (IS_ERR(skb))
548 return PTR_ERR(skb);
549
550 cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid;
551 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
552}
553
554static inline int
555ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
556 u32 enable)
557{
558 struct sk_buff *skb;
559 u32 cmd_id;
560
561 skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger,
562 enable);
563 if (IS_ERR(skb))
564 return PTR_ERR(skb);
565
566 cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid;
567 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
568}
569
570static inline int
571ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
572 const u8 peer_addr[ETH_ALEN])
573{
574 struct sk_buff *skb;
575
576 if (!ar->wmi.ops->gen_peer_create)
577 return -EOPNOTSUPP;
578
579 skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr);
580 if (IS_ERR(skb))
581 return PTR_ERR(skb);
582
583 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
584}
585
586static inline int
587ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
588 const u8 peer_addr[ETH_ALEN])
589{
590 struct sk_buff *skb;
591
592 if (!ar->wmi.ops->gen_peer_delete)
593 return -EOPNOTSUPP;
594
595 skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr);
596 if (IS_ERR(skb))
597 return PTR_ERR(skb);
598
599 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
600}
601
602static inline int
603ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
604 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
605{
606 struct sk_buff *skb;
607
608 if (!ar->wmi.ops->gen_peer_flush)
609 return -EOPNOTSUPP;
610
611 skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap);
612 if (IS_ERR(skb))
613 return PTR_ERR(skb);
614
615 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
616}
617
618static inline int
619ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr,
620 enum wmi_peer_param param_id, u32 param_value)
621{
622 struct sk_buff *skb;
623
624 if (!ar->wmi.ops->gen_peer_set_param)
625 return -EOPNOTSUPP;
626
627 skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id,
628 param_value);
629 if (IS_ERR(skb))
630 return PTR_ERR(skb);
631
632 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
633}
634
635static inline int
636ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
637 enum wmi_sta_ps_mode psmode)
638{
639 struct sk_buff *skb;
640
641 if (!ar->wmi.ops->gen_set_psmode)
642 return -EOPNOTSUPP;
643
644 skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode);
645 if (IS_ERR(skb))
646 return PTR_ERR(skb);
647
648 return ath10k_wmi_cmd_send(ar, skb,
649 ar->wmi.cmd->sta_powersave_mode_cmdid);
650}
651
652static inline int
653ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
654 enum wmi_sta_powersave_param param_id, u32 value)
655{
656 struct sk_buff *skb;
657
658 if (!ar->wmi.ops->gen_set_sta_ps)
659 return -EOPNOTSUPP;
660
661 skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value);
662 if (IS_ERR(skb))
663 return PTR_ERR(skb);
664
665 return ath10k_wmi_cmd_send(ar, skb,
666 ar->wmi.cmd->sta_powersave_param_cmdid);
667}
668
669static inline int
670ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
671 enum wmi_ap_ps_peer_param param_id, u32 value)
672{
673 struct sk_buff *skb;
674
675 if (!ar->wmi.ops->gen_set_ap_ps)
676 return -EOPNOTSUPP;
677
678 skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value);
679 if (IS_ERR(skb))
680 return PTR_ERR(skb);
681
682 return ath10k_wmi_cmd_send(ar, skb,
683 ar->wmi.cmd->ap_ps_peer_param_cmdid);
684}
685
686static inline int
687ath10k_wmi_scan_chan_list(struct ath10k *ar,
688 const struct wmi_scan_chan_list_arg *arg)
689{
690 struct sk_buff *skb;
691
692 if (!ar->wmi.ops->gen_scan_chan_list)
693 return -EOPNOTSUPP;
694
695 skb = ar->wmi.ops->gen_scan_chan_list(ar, arg);
696 if (IS_ERR(skb))
697 return PTR_ERR(skb);
698
699 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
700}
701
702static inline int
703ath10k_wmi_peer_assoc(struct ath10k *ar,
704 const struct wmi_peer_assoc_complete_arg *arg)
705{
706 struct sk_buff *skb;
707
708 if (!ar->wmi.ops->gen_peer_assoc)
709 return -EOPNOTSUPP;
710
711 skb = ar->wmi.ops->gen_peer_assoc(ar, arg);
712 if (IS_ERR(skb))
713 return PTR_ERR(skb);
714
715 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
716}
717
718static inline int
719ath10k_wmi_beacon_send_ref_nowait(struct ath10k_vif *arvif)
720{
721 struct ath10k *ar = arvif->ar;
722 struct sk_buff *skb;
723 int ret;
724
725 if (!ar->wmi.ops->gen_beacon_dma)
726 return -EOPNOTSUPP;
727
728 skb = ar->wmi.ops->gen_beacon_dma(arvif);
729 if (IS_ERR(skb))
730 return PTR_ERR(skb);
731
732 ret = ath10k_wmi_cmd_send_nowait(ar, skb,
733 ar->wmi.cmd->pdev_send_bcn_cmdid);
734 if (ret) {
735 dev_kfree_skb(skb);
736 return ret;
737 }
738
739 return 0;
740}
741
742static inline int
743ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
744 const struct wmi_pdev_set_wmm_params_arg *arg)
745{
746 struct sk_buff *skb;
747
748 if (!ar->wmi.ops->gen_pdev_set_wmm)
749 return -EOPNOTSUPP;
750
751 skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg);
752 if (IS_ERR(skb))
753 return PTR_ERR(skb);
754
755 return ath10k_wmi_cmd_send(ar, skb,
756 ar->wmi.cmd->pdev_set_wmm_params_cmdid);
757}
758
759static inline int
760ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id)
761{
762 struct sk_buff *skb;
763
764 if (!ar->wmi.ops->gen_request_stats)
765 return -EOPNOTSUPP;
766
767 skb = ar->wmi.ops->gen_request_stats(ar, stats_id);
768 if (IS_ERR(skb))
769 return PTR_ERR(skb);
770
771 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
772}
773
774static inline int
775ath10k_wmi_force_fw_hang(struct ath10k *ar,
776 enum wmi_force_fw_hang_type type, u32 delay_ms)
777{
778 struct sk_buff *skb;
779
780 if (!ar->wmi.ops->gen_force_fw_hang)
781 return -EOPNOTSUPP;
782
783 skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms);
784 if (IS_ERR(skb))
785 return PTR_ERR(skb);
786
787 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
788}
789
790static inline int
791ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable)
792{
793 struct sk_buff *skb;
794
795 if (!ar->wmi.ops->gen_dbglog_cfg)
796 return -EOPNOTSUPP;
797
798 skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable);
799 if (IS_ERR(skb))
800 return PTR_ERR(skb);
801
802 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
803}
804
805static inline int
806ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter)
807{
808 struct sk_buff *skb;
809
810 if (!ar->wmi.ops->gen_pktlog_enable)
811 return -EOPNOTSUPP;
812
813 skb = ar->wmi.ops->gen_pktlog_enable(ar, filter);
814 if (IS_ERR(skb))
815 return PTR_ERR(skb);
816
817 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid);
818}
819
820static inline int
821ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar)
822{
823 struct sk_buff *skb;
824
825 if (!ar->wmi.ops->gen_pktlog_disable)
826 return -EOPNOTSUPP;
827
828 skb = ar->wmi.ops->gen_pktlog_disable(ar);
829 if (IS_ERR(skb))
830 return PTR_ERR(skb);
831
832 return ath10k_wmi_cmd_send(ar, skb,
833 ar->wmi.cmd->pdev_pktlog_disable_cmdid);
834}
835
Rajkumar Manoharanffdd7382014-12-17 12:21:40 +0200836static inline int
837ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration,
838 u32 next_offset, u32 enabled)
839{
840 struct sk_buff *skb;
841
842 if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
843 return -EOPNOTSUPP;
844
845 skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration,
846 next_offset, enabled);
847 if (IS_ERR(skb))
848 return PTR_ERR(skb);
849
850 return ath10k_wmi_cmd_send(ar, skb,
851 ar->wmi.cmd->pdev_set_quiet_mode_cmdid);
852}
853
Rajkumar Manoharana57a6a22014-12-17 12:22:17 +0200854static inline int
855ath10k_wmi_pdev_get_temperature(struct ath10k *ar)
856{
857 struct sk_buff *skb;
858
859 if (!ar->wmi.ops->gen_pdev_get_temperature)
860 return -EOPNOTSUPP;
861
862 skb = ar->wmi.ops->gen_pdev_get_temperature(ar);
863 if (IS_ERR(skb))
864 return PTR_ERR(skb);
865
866 return ath10k_wmi_cmd_send(ar, skb,
867 ar->wmi.cmd->pdev_get_temperature_cmdid);
868}
869
Rajkumar Manoharandc8ab272015-01-12 14:07:25 +0200870static inline int
871ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac)
872{
873 struct sk_buff *skb;
874
875 if (!ar->wmi.ops->gen_addba_clear_resp)
876 return -EOPNOTSUPP;
877
878 skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac);
879 if (IS_ERR(skb))
880 return PTR_ERR(skb);
881
882 return ath10k_wmi_cmd_send(ar, skb,
883 ar->wmi.cmd->addba_clear_resp_cmdid);
884}
885
Rajkumar Manoharan65c08932015-01-12 14:07:26 +0200886static inline int
887ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
888 u32 tid, u32 buf_size)
889{
890 struct sk_buff *skb;
891
892 if (!ar->wmi.ops->gen_addba_send)
893 return -EOPNOTSUPP;
894
895 skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size);
896 if (IS_ERR(skb))
897 return PTR_ERR(skb);
898
899 return ath10k_wmi_cmd_send(ar, skb,
900 ar->wmi.cmd->addba_send_cmdid);
901}
902
Rajkumar Manoharan11597412015-01-12 14:07:26 +0200903static inline int
904ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
905 u32 tid, u32 status)
906{
907 struct sk_buff *skb;
908
909 if (!ar->wmi.ops->gen_addba_set_resp)
910 return -EOPNOTSUPP;
911
912 skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status);
913 if (IS_ERR(skb))
914 return PTR_ERR(skb);
915
916 return ath10k_wmi_cmd_send(ar, skb,
917 ar->wmi.cmd->addba_set_resp_cmdid);
918}
919
Rajkumar Manoharan50abef82015-01-12 14:07:26 +0200920static inline int
921ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
922 u32 tid, u32 initiator, u32 reason)
923{
924 struct sk_buff *skb;
925
926 if (!ar->wmi.ops->gen_delba_send)
927 return -EOPNOTSUPP;
928
929 skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator,
930 reason);
931 if (IS_ERR(skb))
932 return PTR_ERR(skb);
933
934 return ath10k_wmi_cmd_send(ar, skb,
935 ar->wmi.cmd->delba_send_cmdid);
936}
937
Michal Kaziord7579d12014-12-03 10:10:54 +0200938#endif