blob: e8f49de183699134958d52c1b8fad2c45eacbb6e [file] [log] [blame]
Michal Kaziord7579d12014-12-03 10:10:54 +02001/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#ifndef _WMI_OPS_H_
19#define _WMI_OPS_H_
20
21struct ath10k;
22struct sk_buff;
23
24struct wmi_ops {
25 void (*rx)(struct ath10k *ar, struct sk_buff *skb);
26 void (*map_svc)(const __le32 *in, unsigned long *out, size_t len);
27
28 int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb,
29 struct wmi_scan_ev_arg *arg);
30 int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb,
31 struct wmi_mgmt_rx_ev_arg *arg);
32 int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb,
33 struct wmi_ch_info_ev_arg *arg);
34 int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb,
35 struct wmi_vdev_start_ev_arg *arg);
36 int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb,
37 struct wmi_peer_kick_ev_arg *arg);
38 int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb,
39 struct wmi_swba_ev_arg *arg);
40 int (*pull_phyerr)(struct ath10k *ar, struct sk_buff *skb,
41 struct wmi_phyerr_ev_arg *arg);
42 int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb,
43 struct wmi_svc_rdy_ev_arg *arg);
44 int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb,
45 struct wmi_rdy_ev_arg *arg);
46 int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb,
47 struct ath10k_fw_stats *stats);
48
49 struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
50 struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
51 struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g,
52 u16 rd5g, u16 ctl2g, u16 ctl5g,
53 enum wmi_dfs_region dfs_reg);
54 struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id,
55 u32 value);
56 struct sk_buff *(*gen_init)(struct ath10k *ar);
57 struct sk_buff *(*gen_start_scan)(struct ath10k *ar,
58 const struct wmi_start_scan_arg *arg);
59 struct sk_buff *(*gen_stop_scan)(struct ath10k *ar,
60 const struct wmi_stop_scan_arg *arg);
61 struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id,
62 enum wmi_vdev_type type,
63 enum wmi_vdev_subtype subtype,
64 const u8 macaddr[ETH_ALEN]);
65 struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id);
66 struct sk_buff *(*gen_vdev_start)(struct ath10k *ar,
67 const struct wmi_vdev_start_request_arg *arg,
68 bool restart);
69 struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id);
70 struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid,
71 const u8 *bssid);
72 struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id);
73 struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id,
74 u32 param_id, u32 param_value);
75 struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar,
76 const struct wmi_vdev_install_key_arg *arg);
77 struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar,
78 const struct wmi_vdev_spectral_conf_arg *arg);
79 struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id,
80 u32 trigger, u32 enable);
81 struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id,
82 const u8 peer_addr[ETH_ALEN]);
83 struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id,
84 const u8 peer_addr[ETH_ALEN]);
85 struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id,
86 const u8 peer_addr[ETH_ALEN],
87 u32 tid_bitmap);
88 struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id,
89 const u8 *peer_addr,
90 enum wmi_peer_param param_id,
91 u32 param_value);
92 struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar,
93 const struct wmi_peer_assoc_complete_arg *arg);
94 struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id,
95 enum wmi_sta_ps_mode psmode);
96 struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id,
97 enum wmi_sta_powersave_param param_id,
98 u32 value);
99 struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id,
100 const u8 *mac,
101 enum wmi_ap_ps_peer_param param_id,
102 u32 value);
103 struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar,
104 const struct wmi_scan_chan_list_arg *arg);
105 struct sk_buff *(*gen_beacon_dma)(struct ath10k_vif *arvif);
106 struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar,
107 const struct wmi_pdev_set_wmm_params_arg *arg);
108 struct sk_buff *(*gen_request_stats)(struct ath10k *ar,
109 enum wmi_stats_id stats_id);
110 struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar,
111 enum wmi_force_fw_hang_type type,
112 u32 delay_ms);
113 struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
114 struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u32 module_enable);
115 struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
116 struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar);
Rajkumar Manoharanffdd7382014-12-17 12:21:40 +0200117 struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar,
118 u32 period, u32 duration,
119 u32 next_offset,
120 u32 enabled);
Rajkumar Manoharana57a6a22014-12-17 12:22:17 +0200121 struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar);
Rajkumar Manoharandc8ab272015-01-12 14:07:25 +0200122 struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id,
123 const u8 *mac);
Rajkumar Manoharan65c08932015-01-12 14:07:26 +0200124 struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id,
125 const u8 *mac, u32 tid, u32 buf_size);
Rajkumar Manoharan11597412015-01-12 14:07:26 +0200126 struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id,
127 const u8 *mac, u32 tid,
128 u32 status);
Michal Kaziord7579d12014-12-03 10:10:54 +0200129};
130
131int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
132
133static inline int
134ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb)
135{
136 if (WARN_ON_ONCE(!ar->wmi.ops->rx))
137 return -EOPNOTSUPP;
138
139 ar->wmi.ops->rx(ar, skb);
140 return 0;
141}
142
143static inline int
144ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out,
145 size_t len)
146{
147 if (!ar->wmi.ops->map_svc)
148 return -EOPNOTSUPP;
149
150 ar->wmi.ops->map_svc(in, out, len);
151 return 0;
152}
153
154static inline int
155ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb,
156 struct wmi_scan_ev_arg *arg)
157{
158 if (!ar->wmi.ops->pull_scan)
159 return -EOPNOTSUPP;
160
161 return ar->wmi.ops->pull_scan(ar, skb, arg);
162}
163
164static inline int
165ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb,
166 struct wmi_mgmt_rx_ev_arg *arg)
167{
168 if (!ar->wmi.ops->pull_mgmt_rx)
169 return -EOPNOTSUPP;
170
171 return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg);
172}
173
174static inline int
175ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb,
176 struct wmi_ch_info_ev_arg *arg)
177{
178 if (!ar->wmi.ops->pull_ch_info)
179 return -EOPNOTSUPP;
180
181 return ar->wmi.ops->pull_ch_info(ar, skb, arg);
182}
183
184static inline int
185ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb,
186 struct wmi_vdev_start_ev_arg *arg)
187{
188 if (!ar->wmi.ops->pull_vdev_start)
189 return -EOPNOTSUPP;
190
191 return ar->wmi.ops->pull_vdev_start(ar, skb, arg);
192}
193
194static inline int
195ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb,
196 struct wmi_peer_kick_ev_arg *arg)
197{
198 if (!ar->wmi.ops->pull_peer_kick)
199 return -EOPNOTSUPP;
200
201 return ar->wmi.ops->pull_peer_kick(ar, skb, arg);
202}
203
204static inline int
205ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb,
206 struct wmi_swba_ev_arg *arg)
207{
208 if (!ar->wmi.ops->pull_swba)
209 return -EOPNOTSUPP;
210
211 return ar->wmi.ops->pull_swba(ar, skb, arg);
212}
213
214static inline int
215ath10k_wmi_pull_phyerr(struct ath10k *ar, struct sk_buff *skb,
216 struct wmi_phyerr_ev_arg *arg)
217{
218 if (!ar->wmi.ops->pull_phyerr)
219 return -EOPNOTSUPP;
220
221 return ar->wmi.ops->pull_phyerr(ar, skb, arg);
222}
223
224static inline int
225ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb,
226 struct wmi_svc_rdy_ev_arg *arg)
227{
228 if (!ar->wmi.ops->pull_svc_rdy)
229 return -EOPNOTSUPP;
230
231 return ar->wmi.ops->pull_svc_rdy(ar, skb, arg);
232}
233
234static inline int
235ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb,
236 struct wmi_rdy_ev_arg *arg)
237{
238 if (!ar->wmi.ops->pull_rdy)
239 return -EOPNOTSUPP;
240
241 return ar->wmi.ops->pull_rdy(ar, skb, arg);
242}
243
244static inline int
245ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
246 struct ath10k_fw_stats *stats)
247{
248 if (!ar->wmi.ops->pull_fw_stats)
249 return -EOPNOTSUPP;
250
251 return ar->wmi.ops->pull_fw_stats(ar, skb, stats);
252}
253
254static inline int
255ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
256{
257 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
258 struct sk_buff *skb;
259 int ret;
260
261 if (!ar->wmi.ops->gen_mgmt_tx)
262 return -EOPNOTSUPP;
263
264 skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu);
265 if (IS_ERR(skb))
266 return PTR_ERR(skb);
267
268 ret = ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->mgmt_tx_cmdid);
269 if (ret)
270 return ret;
271
272 /* FIXME There's no ACK event for Management Tx. This probably
273 * shouldn't be called here either. */
274 info->flags |= IEEE80211_TX_STAT_ACK;
275 ieee80211_tx_status_irqsafe(ar->hw, msdu);
276
277 return 0;
278}
279
280static inline int
281ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
282 u16 ctl2g, u16 ctl5g,
283 enum wmi_dfs_region dfs_reg)
284{
285 struct sk_buff *skb;
286
287 if (!ar->wmi.ops->gen_pdev_set_rd)
288 return -EOPNOTSUPP;
289
290 skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g,
291 dfs_reg);
292 if (IS_ERR(skb))
293 return PTR_ERR(skb);
294
295 return ath10k_wmi_cmd_send(ar, skb,
296 ar->wmi.cmd->pdev_set_regdomain_cmdid);
297}
298
299static inline int
300ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
301{
302 struct sk_buff *skb;
303
304 if (!ar->wmi.ops->gen_pdev_suspend)
305 return -EOPNOTSUPP;
306
307 skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt);
308 if (IS_ERR(skb))
309 return PTR_ERR(skb);
310
311 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
312}
313
314static inline int
315ath10k_wmi_pdev_resume_target(struct ath10k *ar)
316{
317 struct sk_buff *skb;
318
319 if (!ar->wmi.ops->gen_pdev_resume)
320 return -EOPNOTSUPP;
321
322 skb = ar->wmi.ops->gen_pdev_resume(ar);
323 if (IS_ERR(skb))
324 return PTR_ERR(skb);
325
326 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
327}
328
329static inline int
330ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
331{
332 struct sk_buff *skb;
333
334 if (!ar->wmi.ops->gen_pdev_set_param)
335 return -EOPNOTSUPP;
336
337 skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value);
338 if (IS_ERR(skb))
339 return PTR_ERR(skb);
340
341 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
342}
343
344static inline int
345ath10k_wmi_cmd_init(struct ath10k *ar)
346{
347 struct sk_buff *skb;
348
349 if (!ar->wmi.ops->gen_init)
350 return -EOPNOTSUPP;
351
352 skb = ar->wmi.ops->gen_init(ar);
353 if (IS_ERR(skb))
354 return PTR_ERR(skb);
355
356 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid);
357}
358
359static inline int
360ath10k_wmi_start_scan(struct ath10k *ar,
361 const struct wmi_start_scan_arg *arg)
362{
363 struct sk_buff *skb;
364
365 if (!ar->wmi.ops->gen_start_scan)
366 return -EOPNOTSUPP;
367
368 skb = ar->wmi.ops->gen_start_scan(ar, arg);
369 if (IS_ERR(skb))
370 return PTR_ERR(skb);
371
372 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
373}
374
375static inline int
376ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
377{
378 struct sk_buff *skb;
379
380 if (!ar->wmi.ops->gen_stop_scan)
381 return -EOPNOTSUPP;
382
383 skb = ar->wmi.ops->gen_stop_scan(ar, arg);
384 if (IS_ERR(skb))
385 return PTR_ERR(skb);
386
387 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
388}
389
390static inline int
391ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
392 enum wmi_vdev_type type,
393 enum wmi_vdev_subtype subtype,
394 const u8 macaddr[ETH_ALEN])
395{
396 struct sk_buff *skb;
397
398 if (!ar->wmi.ops->gen_vdev_create)
399 return -EOPNOTSUPP;
400
401 skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr);
402 if (IS_ERR(skb))
403 return PTR_ERR(skb);
404
405 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
406}
407
408static inline int
409ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
410{
411 struct sk_buff *skb;
412
413 if (!ar->wmi.ops->gen_vdev_delete)
414 return -EOPNOTSUPP;
415
416 skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id);
417 if (IS_ERR(skb))
418 return PTR_ERR(skb);
419
420 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
421}
422
423static inline int
424ath10k_wmi_vdev_start(struct ath10k *ar,
425 const struct wmi_vdev_start_request_arg *arg)
426{
427 struct sk_buff *skb;
428
429 if (!ar->wmi.ops->gen_vdev_start)
430 return -EOPNOTSUPP;
431
432 skb = ar->wmi.ops->gen_vdev_start(ar, arg, false);
433 if (IS_ERR(skb))
434 return PTR_ERR(skb);
435
436 return ath10k_wmi_cmd_send(ar, skb,
437 ar->wmi.cmd->vdev_start_request_cmdid);
438}
439
440static inline int
441ath10k_wmi_vdev_restart(struct ath10k *ar,
442 const struct wmi_vdev_start_request_arg *arg)
443{
444 struct sk_buff *skb;
445
446 if (!ar->wmi.ops->gen_vdev_start)
447 return -EOPNOTSUPP;
448
449 skb = ar->wmi.ops->gen_vdev_start(ar, arg, true);
450 if (IS_ERR(skb))
451 return PTR_ERR(skb);
452
453 return ath10k_wmi_cmd_send(ar, skb,
454 ar->wmi.cmd->vdev_restart_request_cmdid);
455}
456
457static inline int
458ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
459{
460 struct sk_buff *skb;
461
462 if (!ar->wmi.ops->gen_vdev_stop)
463 return -EOPNOTSUPP;
464
465 skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id);
466 if (IS_ERR(skb))
467 return PTR_ERR(skb);
468
469 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
470}
471
472static inline int
473ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
474{
475 struct sk_buff *skb;
476
477 if (!ar->wmi.ops->gen_vdev_up)
478 return -EOPNOTSUPP;
479
480 skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid);
481 if (IS_ERR(skb))
482 return PTR_ERR(skb);
483
484 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
485}
486
487static inline int
488ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
489{
490 struct sk_buff *skb;
491
492 if (!ar->wmi.ops->gen_vdev_down)
493 return -EOPNOTSUPP;
494
495 skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id);
496 if (IS_ERR(skb))
497 return PTR_ERR(skb);
498
499 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
500}
501
502static inline int
503ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id,
504 u32 param_value)
505{
506 struct sk_buff *skb;
507
508 if (!ar->wmi.ops->gen_vdev_set_param)
509 return -EOPNOTSUPP;
510
511 skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id,
512 param_value);
513 if (IS_ERR(skb))
514 return PTR_ERR(skb);
515
516 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
517}
518
519static inline int
520ath10k_wmi_vdev_install_key(struct ath10k *ar,
521 const struct wmi_vdev_install_key_arg *arg)
522{
523 struct sk_buff *skb;
524
525 if (!ar->wmi.ops->gen_vdev_install_key)
526 return -EOPNOTSUPP;
527
528 skb = ar->wmi.ops->gen_vdev_install_key(ar, arg);
529 if (IS_ERR(skb))
530 return PTR_ERR(skb);
531
532 return ath10k_wmi_cmd_send(ar, skb,
533 ar->wmi.cmd->vdev_install_key_cmdid);
534}
535
536static inline int
537ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
538 const struct wmi_vdev_spectral_conf_arg *arg)
539{
540 struct sk_buff *skb;
541 u32 cmd_id;
542
543 skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg);
544 if (IS_ERR(skb))
545 return PTR_ERR(skb);
546
547 cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid;
548 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
549}
550
551static inline int
552ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
553 u32 enable)
554{
555 struct sk_buff *skb;
556 u32 cmd_id;
557
558 skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger,
559 enable);
560 if (IS_ERR(skb))
561 return PTR_ERR(skb);
562
563 cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid;
564 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
565}
566
567static inline int
568ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
569 const u8 peer_addr[ETH_ALEN])
570{
571 struct sk_buff *skb;
572
573 if (!ar->wmi.ops->gen_peer_create)
574 return -EOPNOTSUPP;
575
576 skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr);
577 if (IS_ERR(skb))
578 return PTR_ERR(skb);
579
580 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
581}
582
583static inline int
584ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
585 const u8 peer_addr[ETH_ALEN])
586{
587 struct sk_buff *skb;
588
589 if (!ar->wmi.ops->gen_peer_delete)
590 return -EOPNOTSUPP;
591
592 skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr);
593 if (IS_ERR(skb))
594 return PTR_ERR(skb);
595
596 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
597}
598
599static inline int
600ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
601 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
602{
603 struct sk_buff *skb;
604
605 if (!ar->wmi.ops->gen_peer_flush)
606 return -EOPNOTSUPP;
607
608 skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap);
609 if (IS_ERR(skb))
610 return PTR_ERR(skb);
611
612 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
613}
614
615static inline int
616ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr,
617 enum wmi_peer_param param_id, u32 param_value)
618{
619 struct sk_buff *skb;
620
621 if (!ar->wmi.ops->gen_peer_set_param)
622 return -EOPNOTSUPP;
623
624 skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id,
625 param_value);
626 if (IS_ERR(skb))
627 return PTR_ERR(skb);
628
629 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
630}
631
632static inline int
633ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
634 enum wmi_sta_ps_mode psmode)
635{
636 struct sk_buff *skb;
637
638 if (!ar->wmi.ops->gen_set_psmode)
639 return -EOPNOTSUPP;
640
641 skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode);
642 if (IS_ERR(skb))
643 return PTR_ERR(skb);
644
645 return ath10k_wmi_cmd_send(ar, skb,
646 ar->wmi.cmd->sta_powersave_mode_cmdid);
647}
648
649static inline int
650ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
651 enum wmi_sta_powersave_param param_id, u32 value)
652{
653 struct sk_buff *skb;
654
655 if (!ar->wmi.ops->gen_set_sta_ps)
656 return -EOPNOTSUPP;
657
658 skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value);
659 if (IS_ERR(skb))
660 return PTR_ERR(skb);
661
662 return ath10k_wmi_cmd_send(ar, skb,
663 ar->wmi.cmd->sta_powersave_param_cmdid);
664}
665
666static inline int
667ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
668 enum wmi_ap_ps_peer_param param_id, u32 value)
669{
670 struct sk_buff *skb;
671
672 if (!ar->wmi.ops->gen_set_ap_ps)
673 return -EOPNOTSUPP;
674
675 skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value);
676 if (IS_ERR(skb))
677 return PTR_ERR(skb);
678
679 return ath10k_wmi_cmd_send(ar, skb,
680 ar->wmi.cmd->ap_ps_peer_param_cmdid);
681}
682
683static inline int
684ath10k_wmi_scan_chan_list(struct ath10k *ar,
685 const struct wmi_scan_chan_list_arg *arg)
686{
687 struct sk_buff *skb;
688
689 if (!ar->wmi.ops->gen_scan_chan_list)
690 return -EOPNOTSUPP;
691
692 skb = ar->wmi.ops->gen_scan_chan_list(ar, arg);
693 if (IS_ERR(skb))
694 return PTR_ERR(skb);
695
696 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
697}
698
699static inline int
700ath10k_wmi_peer_assoc(struct ath10k *ar,
701 const struct wmi_peer_assoc_complete_arg *arg)
702{
703 struct sk_buff *skb;
704
705 if (!ar->wmi.ops->gen_peer_assoc)
706 return -EOPNOTSUPP;
707
708 skb = ar->wmi.ops->gen_peer_assoc(ar, arg);
709 if (IS_ERR(skb))
710 return PTR_ERR(skb);
711
712 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
713}
714
715static inline int
716ath10k_wmi_beacon_send_ref_nowait(struct ath10k_vif *arvif)
717{
718 struct ath10k *ar = arvif->ar;
719 struct sk_buff *skb;
720 int ret;
721
722 if (!ar->wmi.ops->gen_beacon_dma)
723 return -EOPNOTSUPP;
724
725 skb = ar->wmi.ops->gen_beacon_dma(arvif);
726 if (IS_ERR(skb))
727 return PTR_ERR(skb);
728
729 ret = ath10k_wmi_cmd_send_nowait(ar, skb,
730 ar->wmi.cmd->pdev_send_bcn_cmdid);
731 if (ret) {
732 dev_kfree_skb(skb);
733 return ret;
734 }
735
736 return 0;
737}
738
739static inline int
740ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
741 const struct wmi_pdev_set_wmm_params_arg *arg)
742{
743 struct sk_buff *skb;
744
745 if (!ar->wmi.ops->gen_pdev_set_wmm)
746 return -EOPNOTSUPP;
747
748 skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg);
749 if (IS_ERR(skb))
750 return PTR_ERR(skb);
751
752 return ath10k_wmi_cmd_send(ar, skb,
753 ar->wmi.cmd->pdev_set_wmm_params_cmdid);
754}
755
756static inline int
757ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id)
758{
759 struct sk_buff *skb;
760
761 if (!ar->wmi.ops->gen_request_stats)
762 return -EOPNOTSUPP;
763
764 skb = ar->wmi.ops->gen_request_stats(ar, stats_id);
765 if (IS_ERR(skb))
766 return PTR_ERR(skb);
767
768 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
769}
770
771static inline int
772ath10k_wmi_force_fw_hang(struct ath10k *ar,
773 enum wmi_force_fw_hang_type type, u32 delay_ms)
774{
775 struct sk_buff *skb;
776
777 if (!ar->wmi.ops->gen_force_fw_hang)
778 return -EOPNOTSUPP;
779
780 skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms);
781 if (IS_ERR(skb))
782 return PTR_ERR(skb);
783
784 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
785}
786
787static inline int
788ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable)
789{
790 struct sk_buff *skb;
791
792 if (!ar->wmi.ops->gen_dbglog_cfg)
793 return -EOPNOTSUPP;
794
795 skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable);
796 if (IS_ERR(skb))
797 return PTR_ERR(skb);
798
799 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
800}
801
802static inline int
803ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter)
804{
805 struct sk_buff *skb;
806
807 if (!ar->wmi.ops->gen_pktlog_enable)
808 return -EOPNOTSUPP;
809
810 skb = ar->wmi.ops->gen_pktlog_enable(ar, filter);
811 if (IS_ERR(skb))
812 return PTR_ERR(skb);
813
814 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid);
815}
816
817static inline int
818ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar)
819{
820 struct sk_buff *skb;
821
822 if (!ar->wmi.ops->gen_pktlog_disable)
823 return -EOPNOTSUPP;
824
825 skb = ar->wmi.ops->gen_pktlog_disable(ar);
826 if (IS_ERR(skb))
827 return PTR_ERR(skb);
828
829 return ath10k_wmi_cmd_send(ar, skb,
830 ar->wmi.cmd->pdev_pktlog_disable_cmdid);
831}
832
Rajkumar Manoharanffdd7382014-12-17 12:21:40 +0200833static inline int
834ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration,
835 u32 next_offset, u32 enabled)
836{
837 struct sk_buff *skb;
838
839 if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
840 return -EOPNOTSUPP;
841
842 skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration,
843 next_offset, enabled);
844 if (IS_ERR(skb))
845 return PTR_ERR(skb);
846
847 return ath10k_wmi_cmd_send(ar, skb,
848 ar->wmi.cmd->pdev_set_quiet_mode_cmdid);
849}
850
Rajkumar Manoharana57a6a22014-12-17 12:22:17 +0200851static inline int
852ath10k_wmi_pdev_get_temperature(struct ath10k *ar)
853{
854 struct sk_buff *skb;
855
856 if (!ar->wmi.ops->gen_pdev_get_temperature)
857 return -EOPNOTSUPP;
858
859 skb = ar->wmi.ops->gen_pdev_get_temperature(ar);
860 if (IS_ERR(skb))
861 return PTR_ERR(skb);
862
863 return ath10k_wmi_cmd_send(ar, skb,
864 ar->wmi.cmd->pdev_get_temperature_cmdid);
865}
866
Rajkumar Manoharandc8ab272015-01-12 14:07:25 +0200867static inline int
868ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac)
869{
870 struct sk_buff *skb;
871
872 if (!ar->wmi.ops->gen_addba_clear_resp)
873 return -EOPNOTSUPP;
874
875 skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac);
876 if (IS_ERR(skb))
877 return PTR_ERR(skb);
878
879 return ath10k_wmi_cmd_send(ar, skb,
880 ar->wmi.cmd->addba_clear_resp_cmdid);
881}
882
Rajkumar Manoharan65c08932015-01-12 14:07:26 +0200883static inline int
884ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
885 u32 tid, u32 buf_size)
886{
887 struct sk_buff *skb;
888
889 if (!ar->wmi.ops->gen_addba_send)
890 return -EOPNOTSUPP;
891
892 skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size);
893 if (IS_ERR(skb))
894 return PTR_ERR(skb);
895
896 return ath10k_wmi_cmd_send(ar, skb,
897 ar->wmi.cmd->addba_send_cmdid);
898}
899
Rajkumar Manoharan11597412015-01-12 14:07:26 +0200900static inline int
901ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
902 u32 tid, u32 status)
903{
904 struct sk_buff *skb;
905
906 if (!ar->wmi.ops->gen_addba_set_resp)
907 return -EOPNOTSUPP;
908
909 skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status);
910 if (IS_ERR(skb))
911 return PTR_ERR(skb);
912
913 return ath10k_wmi_cmd_send(ar, skb,
914 ar->wmi.cmd->addba_set_resp_cmdid);
915}
916
Michal Kaziord7579d12014-12-03 10:10:54 +0200917#endif