blob: 89e7076c919fd61abc04a2648ad67b1b4e2820f7 [file] [log] [blame]
Kalle Valo5e3dd152013-06-12 20:52:10 +03001/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#ifndef _HIF_H_
19#define _HIF_H_
20
21#include <linux/kernel.h>
22#include "core.h"
Yanbo Li077a3802014-11-25 12:24:33 +020023#include "debug.h"
Kalle Valo5e3dd152013-06-12 20:52:10 +030024
Michal Kazior726346f2014-02-27 18:50:04 +020025struct ath10k_hif_sg_item {
26 u16 transfer_id;
27 void *transfer_context; /* NULL = tx completion callback not called */
28 void *vaddr; /* for debugging mostly */
29 u32 paddr;
30 u16 len;
31};
32
Kalle Valo5e3dd152013-06-12 20:52:10 +030033struct ath10k_hif_ops {
Michal Kazior726346f2014-02-27 18:50:04 +020034 /* send a scatter-gather list to the target */
35 int (*tx_sg)(struct ath10k *ar, u8 pipe_id,
36 struct ath10k_hif_sg_item *items, int n_items);
Kalle Valo5e3dd152013-06-12 20:52:10 +030037
Kalle Valoeef25402014-09-24 14:16:52 +030038 /* read firmware memory through the diagnose interface */
39 int (*diag_read)(struct ath10k *ar, u32 address, void *buf,
40 size_t buf_len);
41
Yanbo Li9f65ad22014-11-25 12:24:48 +020042 int (*diag_write)(struct ath10k *ar, u32 address, const void *data,
43 int nbytes);
Kalle Valo5e3dd152013-06-12 20:52:10 +030044 /*
45 * API to handle HIF-specific BMI message exchanges, this API is
46 * synchronous and only allowed to be called from a context that
47 * can block (sleep)
48 */
49 int (*exchange_bmi_msg)(struct ath10k *ar,
50 void *request, u32 request_len,
51 void *response, u32 *response_len);
52
Michal Kazior8c5c5362013-07-16 09:38:50 +020053 /* Post BMI phase, after FW is loaded. Starts regular operation */
Kalle Valo5e3dd152013-06-12 20:52:10 +030054 int (*start)(struct ath10k *ar);
55
Michal Kazior8c5c5362013-07-16 09:38:50 +020056 /* Clean up what start() did. This does not revert to BMI phase. If
57 * desired so, call power_down() and power_up() */
Kalle Valo5e3dd152013-06-12 20:52:10 +030058 void (*stop)(struct ath10k *ar);
59
60 int (*map_service_to_pipe)(struct ath10k *ar, u16 service_id,
Rajkumar Manoharan400143e2015-10-12 18:27:06 +053061 u8 *ul_pipe, u8 *dl_pipe);
Kalle Valo5e3dd152013-06-12 20:52:10 +030062
63 void (*get_default_pipe)(struct ath10k *ar, u8 *ul_pipe, u8 *dl_pipe);
64
65 /*
66 * Check if prior sends have completed.
67 *
68 * Check whether the pipe in question has any completed
69 * sends that have not yet been processed.
70 * This function is only relevant for HIF pipes that are configured
71 * to be polled rather than interrupt-driven.
72 */
73 void (*send_complete_check)(struct ath10k *ar, u8 pipe_id, int force);
74
Kalle Valo5e3dd152013-06-12 20:52:10 +030075 u16 (*get_free_queue_number)(struct ath10k *ar, u8 pipe_id);
Michal Kazior8c5c5362013-07-16 09:38:50 +020076
Yanbo Li077a3802014-11-25 12:24:33 +020077 u32 (*read32)(struct ath10k *ar, u32 address);
78
79 void (*write32)(struct ath10k *ar, u32 address, u32 value);
80
Michal Kazior8c5c5362013-07-16 09:38:50 +020081 /* Power up the device and enter BMI transfer mode for FW download */
82 int (*power_up)(struct ath10k *ar);
83
84 /* Power down the device and free up resources. stop() must be called
85 * before this if start() was called earlier */
86 void (*power_down)(struct ath10k *ar);
Michal Kazior8cd13ca2013-07-16 09:38:54 +020087
88 int (*suspend)(struct ath10k *ar);
89 int (*resume)(struct ath10k *ar);
Kalle Valo5e3dd152013-06-12 20:52:10 +030090};
91
Michal Kazior726346f2014-02-27 18:50:04 +020092static inline int ath10k_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
93 struct ath10k_hif_sg_item *items,
94 int n_items)
Kalle Valo5e3dd152013-06-12 20:52:10 +030095{
Michal Kazior726346f2014-02-27 18:50:04 +020096 return ar->hif.ops->tx_sg(ar, pipe_id, items, n_items);
Kalle Valo5e3dd152013-06-12 20:52:10 +030097}
98
Kalle Valoeef25402014-09-24 14:16:52 +030099static inline int ath10k_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
100 size_t buf_len)
101{
102 return ar->hif.ops->diag_read(ar, address, buf, buf_len);
103}
104
Yanbo Li9f65ad22014-11-25 12:24:48 +0200105static inline int ath10k_hif_diag_write(struct ath10k *ar, u32 address,
106 const void *data, int nbytes)
107{
108 if (!ar->hif.ops->diag_write)
109 return -EOPNOTSUPP;
110
111 return ar->hif.ops->diag_write(ar, address, data, nbytes);
112}
113
Kalle Valo5e3dd152013-06-12 20:52:10 +0300114static inline int ath10k_hif_exchange_bmi_msg(struct ath10k *ar,
115 void *request, u32 request_len,
116 void *response, u32 *response_len)
117{
118 return ar->hif.ops->exchange_bmi_msg(ar, request, request_len,
119 response, response_len);
120}
121
122static inline int ath10k_hif_start(struct ath10k *ar)
123{
124 return ar->hif.ops->start(ar);
125}
126
127static inline void ath10k_hif_stop(struct ath10k *ar)
128{
129 return ar->hif.ops->stop(ar);
130}
131
132static inline int ath10k_hif_map_service_to_pipe(struct ath10k *ar,
133 u16 service_id,
Rajkumar Manoharan400143e2015-10-12 18:27:06 +0530134 u8 *ul_pipe, u8 *dl_pipe)
Kalle Valo5e3dd152013-06-12 20:52:10 +0300135{
136 return ar->hif.ops->map_service_to_pipe(ar, service_id,
Rajkumar Manoharan400143e2015-10-12 18:27:06 +0530137 ul_pipe, dl_pipe);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300138}
139
140static inline void ath10k_hif_get_default_pipe(struct ath10k *ar,
141 u8 *ul_pipe, u8 *dl_pipe)
142{
143 ar->hif.ops->get_default_pipe(ar, ul_pipe, dl_pipe);
144}
145
146static inline void ath10k_hif_send_complete_check(struct ath10k *ar,
147 u8 pipe_id, int force)
148{
149 ar->hif.ops->send_complete_check(ar, pipe_id, force);
150}
151
Kalle Valo5e3dd152013-06-12 20:52:10 +0300152static inline u16 ath10k_hif_get_free_queue_number(struct ath10k *ar,
153 u8 pipe_id)
154{
155 return ar->hif.ops->get_free_queue_number(ar, pipe_id);
156}
157
Michal Kazior8c5c5362013-07-16 09:38:50 +0200158static inline int ath10k_hif_power_up(struct ath10k *ar)
159{
160 return ar->hif.ops->power_up(ar);
161}
162
163static inline void ath10k_hif_power_down(struct ath10k *ar)
164{
165 ar->hif.ops->power_down(ar);
166}
167
Michal Kazior8cd13ca2013-07-16 09:38:54 +0200168static inline int ath10k_hif_suspend(struct ath10k *ar)
169{
170 if (!ar->hif.ops->suspend)
171 return -EOPNOTSUPP;
172
173 return ar->hif.ops->suspend(ar);
174}
175
176static inline int ath10k_hif_resume(struct ath10k *ar)
177{
178 if (!ar->hif.ops->resume)
179 return -EOPNOTSUPP;
180
181 return ar->hif.ops->resume(ar);
182}
183
Yanbo Li077a3802014-11-25 12:24:33 +0200184static inline u32 ath10k_hif_read32(struct ath10k *ar, u32 address)
185{
186 if (!ar->hif.ops->read32) {
187 ath10k_warn(ar, "hif read32 not supported\n");
188 return 0xdeaddead;
189 }
190
191 return ar->hif.ops->read32(ar, address);
192}
193
194static inline void ath10k_hif_write32(struct ath10k *ar,
195 u32 address, u32 data)
196{
197 if (!ar->hif.ops->write32) {
198 ath10k_warn(ar, "hif write32 not supported\n");
199 return;
200 }
201
202 ar->hif.ops->write32(ar, address, data);
203}
204
Kalle Valo5e3dd152013-06-12 20:52:10 +0300205#endif /* _HIF_H_ */