blob: dcdea68bcc0a0ffd091eee16e0b74654ef3339b7 [file] [log] [blame]
Kalle Valo5e3dd152013-06-12 20:52:10 +03001/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#ifndef _HIF_H_
19#define _HIF_H_
20
21#include <linux/kernel.h>
22#include "core.h"
23
24struct ath10k_hif_cb {
25 int (*tx_completion)(struct ath10k *ar,
26 struct sk_buff *wbuf,
27 unsigned transfer_id);
28 int (*rx_completion)(struct ath10k *ar,
29 struct sk_buff *wbuf,
30 u8 pipe_id);
31};
32
33struct ath10k_hif_ops {
34 /* Send the head of a buffer to HIF for transmission to the target. */
35 int (*send_head)(struct ath10k *ar, u8 pipe_id,
36 unsigned int transfer_id,
37 unsigned int nbytes,
38 struct sk_buff *buf);
39
40 /*
41 * API to handle HIF-specific BMI message exchanges, this API is
42 * synchronous and only allowed to be called from a context that
43 * can block (sleep)
44 */
45 int (*exchange_bmi_msg)(struct ath10k *ar,
46 void *request, u32 request_len,
47 void *response, u32 *response_len);
48
Michal Kazior8c5c5362013-07-16 09:38:50 +020049 /* Post BMI phase, after FW is loaded. Starts regular operation */
Kalle Valo5e3dd152013-06-12 20:52:10 +030050 int (*start)(struct ath10k *ar);
51
Michal Kazior8c5c5362013-07-16 09:38:50 +020052 /* Clean up what start() did. This does not revert to BMI phase. If
53 * desired so, call power_down() and power_up() */
Kalle Valo5e3dd152013-06-12 20:52:10 +030054 void (*stop)(struct ath10k *ar);
55
56 int (*map_service_to_pipe)(struct ath10k *ar, u16 service_id,
57 u8 *ul_pipe, u8 *dl_pipe,
58 int *ul_is_polled, int *dl_is_polled);
59
60 void (*get_default_pipe)(struct ath10k *ar, u8 *ul_pipe, u8 *dl_pipe);
61
62 /*
63 * Check if prior sends have completed.
64 *
65 * Check whether the pipe in question has any completed
66 * sends that have not yet been processed.
67 * This function is only relevant for HIF pipes that are configured
68 * to be polled rather than interrupt-driven.
69 */
70 void (*send_complete_check)(struct ath10k *ar, u8 pipe_id, int force);
71
Michal Kaziore799bbf2013-07-05 16:15:12 +030072 void (*set_callbacks)(struct ath10k *ar,
73 struct ath10k_hif_cb *callbacks);
Kalle Valo5e3dd152013-06-12 20:52:10 +030074
75 u16 (*get_free_queue_number)(struct ath10k *ar, u8 pipe_id);
Michal Kazior8c5c5362013-07-16 09:38:50 +020076
77 /* Power up the device and enter BMI transfer mode for FW download */
78 int (*power_up)(struct ath10k *ar);
79
80 /* Power down the device and free up resources. stop() must be called
81 * before this if start() was called earlier */
82 void (*power_down)(struct ath10k *ar);
Michal Kazior8cd13ca2013-07-16 09:38:54 +020083
84 int (*suspend)(struct ath10k *ar);
85 int (*resume)(struct ath10k *ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +030086};
87
88
89static inline int ath10k_hif_send_head(struct ath10k *ar, u8 pipe_id,
90 unsigned int transfer_id,
91 unsigned int nbytes,
92 struct sk_buff *buf)
93{
94 return ar->hif.ops->send_head(ar, pipe_id, transfer_id, nbytes, buf);
95}
96
97static inline int ath10k_hif_exchange_bmi_msg(struct ath10k *ar,
98 void *request, u32 request_len,
99 void *response, u32 *response_len)
100{
101 return ar->hif.ops->exchange_bmi_msg(ar, request, request_len,
102 response, response_len);
103}
104
105static inline int ath10k_hif_start(struct ath10k *ar)
106{
107 return ar->hif.ops->start(ar);
108}
109
110static inline void ath10k_hif_stop(struct ath10k *ar)
111{
112 return ar->hif.ops->stop(ar);
113}
114
115static inline int ath10k_hif_map_service_to_pipe(struct ath10k *ar,
116 u16 service_id,
117 u8 *ul_pipe, u8 *dl_pipe,
118 int *ul_is_polled,
119 int *dl_is_polled)
120{
121 return ar->hif.ops->map_service_to_pipe(ar, service_id,
122 ul_pipe, dl_pipe,
123 ul_is_polled, dl_is_polled);
124}
125
126static inline void ath10k_hif_get_default_pipe(struct ath10k *ar,
127 u8 *ul_pipe, u8 *dl_pipe)
128{
129 ar->hif.ops->get_default_pipe(ar, ul_pipe, dl_pipe);
130}
131
132static inline void ath10k_hif_send_complete_check(struct ath10k *ar,
133 u8 pipe_id, int force)
134{
135 ar->hif.ops->send_complete_check(ar, pipe_id, force);
136}
137
Michal Kaziore799bbf2013-07-05 16:15:12 +0300138static inline void ath10k_hif_set_callbacks(struct ath10k *ar,
139 struct ath10k_hif_cb *callbacks)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300140{
Michal Kaziore799bbf2013-07-05 16:15:12 +0300141 ar->hif.ops->set_callbacks(ar, callbacks);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300142}
143
144static inline u16 ath10k_hif_get_free_queue_number(struct ath10k *ar,
145 u8 pipe_id)
146{
147 return ar->hif.ops->get_free_queue_number(ar, pipe_id);
148}
149
Michal Kazior8c5c5362013-07-16 09:38:50 +0200150static inline int ath10k_hif_power_up(struct ath10k *ar)
151{
152 return ar->hif.ops->power_up(ar);
153}
154
155static inline void ath10k_hif_power_down(struct ath10k *ar)
156{
157 ar->hif.ops->power_down(ar);
158}
159
Michal Kazior8cd13ca2013-07-16 09:38:54 +0200160static inline int ath10k_hif_suspend(struct ath10k *ar)
161{
162 if (!ar->hif.ops->suspend)
163 return -EOPNOTSUPP;
164
165 return ar->hif.ops->suspend(ar);
166}
167
168static inline int ath10k_hif_resume(struct ath10k *ar)
169{
170 if (!ar->hif.ops->resume)
171 return -EOPNOTSUPP;
172
173 return ar->hif.ops->resume(ar);
174}
175
Kalle Valo5e3dd152013-06-12 20:52:10 +0300176#endif /* _HIF_H_ */