blob: 633594cd54fad6396722c2f45eb7ecf3d775ea00 [file] [log] [blame]
Kalle Valo5e3dd152013-06-12 20:52:10 +03001/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#ifndef _HIF_H_
19#define _HIF_H_
20
21#include <linux/kernel.h>
22#include "core.h"
Yanbo Li077a3802014-11-25 12:24:33 +020023#include "debug.h"
Kalle Valo5e3dd152013-06-12 20:52:10 +030024
Michal Kazior726346f2014-02-27 18:50:04 +020025struct ath10k_hif_sg_item {
26 u16 transfer_id;
27 void *transfer_context; /* NULL = tx completion callback not called */
28 void *vaddr; /* for debugging mostly */
29 u32 paddr;
30 u16 len;
31};
32
Kalle Valo5e3dd152013-06-12 20:52:10 +030033struct ath10k_hif_ops {
Michal Kazior726346f2014-02-27 18:50:04 +020034 /* send a scatter-gather list to the target */
35 int (*tx_sg)(struct ath10k *ar, u8 pipe_id,
36 struct ath10k_hif_sg_item *items, int n_items);
Kalle Valo5e3dd152013-06-12 20:52:10 +030037
Kalle Valoeef25402014-09-24 14:16:52 +030038 /* read firmware memory through the diagnose interface */
39 int (*diag_read)(struct ath10k *ar, u32 address, void *buf,
40 size_t buf_len);
41
Yanbo Li9f65ad22014-11-25 12:24:48 +020042 int (*diag_write)(struct ath10k *ar, u32 address, const void *data,
43 int nbytes);
Kalle Valo5e3dd152013-06-12 20:52:10 +030044 /*
45 * API to handle HIF-specific BMI message exchanges, this API is
46 * synchronous and only allowed to be called from a context that
47 * can block (sleep)
48 */
49 int (*exchange_bmi_msg)(struct ath10k *ar,
50 void *request, u32 request_len,
51 void *response, u32 *response_len);
52
Michal Kazior8c5c5362013-07-16 09:38:50 +020053 /* Post BMI phase, after FW is loaded. Starts regular operation */
Kalle Valo5e3dd152013-06-12 20:52:10 +030054 int (*start)(struct ath10k *ar);
55
Michal Kazior8c5c5362013-07-16 09:38:50 +020056 /* Clean up what start() did. This does not revert to BMI phase. If
57 * desired so, call power_down() and power_up() */
Kalle Valo5e3dd152013-06-12 20:52:10 +030058 void (*stop)(struct ath10k *ar);
59
60 int (*map_service_to_pipe)(struct ath10k *ar, u16 service_id,
61 u8 *ul_pipe, u8 *dl_pipe,
Rajkumar Manoharan0da64f12015-10-12 18:27:05 +053062 int *ul_is_polled);
Kalle Valo5e3dd152013-06-12 20:52:10 +030063
64 void (*get_default_pipe)(struct ath10k *ar, u8 *ul_pipe, u8 *dl_pipe);
65
66 /*
67 * Check if prior sends have completed.
68 *
69 * Check whether the pipe in question has any completed
70 * sends that have not yet been processed.
71 * This function is only relevant for HIF pipes that are configured
72 * to be polled rather than interrupt-driven.
73 */
74 void (*send_complete_check)(struct ath10k *ar, u8 pipe_id, int force);
75
Kalle Valo5e3dd152013-06-12 20:52:10 +030076 u16 (*get_free_queue_number)(struct ath10k *ar, u8 pipe_id);
Michal Kazior8c5c5362013-07-16 09:38:50 +020077
Yanbo Li077a3802014-11-25 12:24:33 +020078 u32 (*read32)(struct ath10k *ar, u32 address);
79
80 void (*write32)(struct ath10k *ar, u32 address, u32 value);
81
Michal Kazior8c5c5362013-07-16 09:38:50 +020082 /* Power up the device and enter BMI transfer mode for FW download */
83 int (*power_up)(struct ath10k *ar);
84
85 /* Power down the device and free up resources. stop() must be called
86 * before this if start() was called earlier */
87 void (*power_down)(struct ath10k *ar);
Michal Kazior8cd13ca2013-07-16 09:38:54 +020088
89 int (*suspend)(struct ath10k *ar);
90 int (*resume)(struct ath10k *ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +030091};
92
Michal Kazior726346f2014-02-27 18:50:04 +020093static inline int ath10k_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
94 struct ath10k_hif_sg_item *items,
95 int n_items)
Kalle Valo5e3dd152013-06-12 20:52:10 +030096{
Michal Kazior726346f2014-02-27 18:50:04 +020097 return ar->hif.ops->tx_sg(ar, pipe_id, items, n_items);
Kalle Valo5e3dd152013-06-12 20:52:10 +030098}
99
Kalle Valoeef25402014-09-24 14:16:52 +0300100static inline int ath10k_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
101 size_t buf_len)
102{
103 return ar->hif.ops->diag_read(ar, address, buf, buf_len);
104}
105
Yanbo Li9f65ad22014-11-25 12:24:48 +0200106static inline int ath10k_hif_diag_write(struct ath10k *ar, u32 address,
107 const void *data, int nbytes)
108{
109 if (!ar->hif.ops->diag_write)
110 return -EOPNOTSUPP;
111
112 return ar->hif.ops->diag_write(ar, address, data, nbytes);
113}
114
Kalle Valo5e3dd152013-06-12 20:52:10 +0300115static inline int ath10k_hif_exchange_bmi_msg(struct ath10k *ar,
116 void *request, u32 request_len,
117 void *response, u32 *response_len)
118{
119 return ar->hif.ops->exchange_bmi_msg(ar, request, request_len,
120 response, response_len);
121}
122
123static inline int ath10k_hif_start(struct ath10k *ar)
124{
125 return ar->hif.ops->start(ar);
126}
127
128static inline void ath10k_hif_stop(struct ath10k *ar)
129{
130 return ar->hif.ops->stop(ar);
131}
132
133static inline int ath10k_hif_map_service_to_pipe(struct ath10k *ar,
134 u16 service_id,
135 u8 *ul_pipe, u8 *dl_pipe,
Rajkumar Manoharan0da64f12015-10-12 18:27:05 +0530136 int *ul_is_polled)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300137{
138 return ar->hif.ops->map_service_to_pipe(ar, service_id,
139 ul_pipe, dl_pipe,
Rajkumar Manoharan0da64f12015-10-12 18:27:05 +0530140 ul_is_polled);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300141}
142
143static inline void ath10k_hif_get_default_pipe(struct ath10k *ar,
144 u8 *ul_pipe, u8 *dl_pipe)
145{
146 ar->hif.ops->get_default_pipe(ar, ul_pipe, dl_pipe);
147}
148
149static inline void ath10k_hif_send_complete_check(struct ath10k *ar,
150 u8 pipe_id, int force)
151{
152 ar->hif.ops->send_complete_check(ar, pipe_id, force);
153}
154
Kalle Valo5e3dd152013-06-12 20:52:10 +0300155static inline u16 ath10k_hif_get_free_queue_number(struct ath10k *ar,
156 u8 pipe_id)
157{
158 return ar->hif.ops->get_free_queue_number(ar, pipe_id);
159}
160
Michal Kazior8c5c5362013-07-16 09:38:50 +0200161static inline int ath10k_hif_power_up(struct ath10k *ar)
162{
163 return ar->hif.ops->power_up(ar);
164}
165
166static inline void ath10k_hif_power_down(struct ath10k *ar)
167{
168 ar->hif.ops->power_down(ar);
169}
170
Michal Kazior8cd13ca2013-07-16 09:38:54 +0200171static inline int ath10k_hif_suspend(struct ath10k *ar)
172{
173 if (!ar->hif.ops->suspend)
174 return -EOPNOTSUPP;
175
176 return ar->hif.ops->suspend(ar);
177}
178
179static inline int ath10k_hif_resume(struct ath10k *ar)
180{
181 if (!ar->hif.ops->resume)
182 return -EOPNOTSUPP;
183
184 return ar->hif.ops->resume(ar);
185}
186
Yanbo Li077a3802014-11-25 12:24:33 +0200187static inline u32 ath10k_hif_read32(struct ath10k *ar, u32 address)
188{
189 if (!ar->hif.ops->read32) {
190 ath10k_warn(ar, "hif read32 not supported\n");
191 return 0xdeaddead;
192 }
193
194 return ar->hif.ops->read32(ar, address);
195}
196
197static inline void ath10k_hif_write32(struct ath10k *ar,
198 u32 address, u32 data)
199{
200 if (!ar->hif.ops->write32) {
201 ath10k_warn(ar, "hif write32 not supported\n");
202 return;
203 }
204
205 ar->hif.ops->write32(ar, address, data);
206}
207
Kalle Valo5e3dd152013-06-12 20:52:10 +0300208#endif /* _HIF_H_ */