blob: 7be284c899f4ea24aef47808f13d5c5a6a0a0131 [file] [log] [blame]
Kalle Valo5e3dd152013-06-12 20:52:10 +03001/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include <linux/module.h>
19#include <linux/debugfs.h>
20
21#include "core.h"
22#include "debug.h"
23
Kalle Valoa3d135e2013-09-03 11:44:10 +030024/* ms */
25#define ATH10K_DEBUG_HTT_STATS_INTERVAL 1000
26
Kalle Valo5e3dd152013-06-12 20:52:10 +030027static int ath10k_printk(const char *level, const char *fmt, ...)
28{
29 struct va_format vaf;
30 va_list args;
31 int rtn;
32
33 va_start(args, fmt);
34
35 vaf.fmt = fmt;
36 vaf.va = &args;
37
38 rtn = printk("%sath10k: %pV", level, &vaf);
39
40 va_end(args);
41
42 return rtn;
43}
44
45int ath10k_info(const char *fmt, ...)
46{
47 struct va_format vaf = {
48 .fmt = fmt,
49 };
50 va_list args;
51 int ret;
52
53 va_start(args, fmt);
54 vaf.va = &args;
55 ret = ath10k_printk(KERN_INFO, "%pV", &vaf);
56 trace_ath10k_log_info(&vaf);
57 va_end(args);
58
59 return ret;
60}
61EXPORT_SYMBOL(ath10k_info);
62
63int ath10k_err(const char *fmt, ...)
64{
65 struct va_format vaf = {
66 .fmt = fmt,
67 };
68 va_list args;
69 int ret;
70
71 va_start(args, fmt);
72 vaf.va = &args;
73 ret = ath10k_printk(KERN_ERR, "%pV", &vaf);
74 trace_ath10k_log_err(&vaf);
75 va_end(args);
76
77 return ret;
78}
79EXPORT_SYMBOL(ath10k_err);
80
81int ath10k_warn(const char *fmt, ...)
82{
83 struct va_format vaf = {
84 .fmt = fmt,
85 };
86 va_list args;
87 int ret = 0;
88
89 va_start(args, fmt);
90 vaf.va = &args;
91
92 if (net_ratelimit())
93 ret = ath10k_printk(KERN_WARNING, "%pV", &vaf);
94
95 trace_ath10k_log_warn(&vaf);
96
97 va_end(args);
98
99 return ret;
100}
101EXPORT_SYMBOL(ath10k_warn);
102
103#ifdef CONFIG_ATH10K_DEBUGFS
104
105void ath10k_debug_read_service_map(struct ath10k *ar,
106 void *service_map,
107 size_t map_size)
108{
109 memcpy(ar->debug.wmi_service_bitmap, service_map, map_size);
110}
111
112static ssize_t ath10k_read_wmi_services(struct file *file,
113 char __user *user_buf,
114 size_t count, loff_t *ppos)
115{
116 struct ath10k *ar = file->private_data;
117 char *buf;
118 unsigned int len = 0, buf_len = 1500;
119 const char *status;
120 ssize_t ret_cnt;
121 int i;
122
123 buf = kzalloc(buf_len, GFP_KERNEL);
124 if (!buf)
125 return -ENOMEM;
126
127 mutex_lock(&ar->conf_mutex);
128
129 if (len > buf_len)
130 len = buf_len;
131
132 for (i = 0; i < WMI_SERVICE_LAST; i++) {
133 if (WMI_SERVICE_IS_ENABLED(ar->debug.wmi_service_bitmap, i))
134 status = "enabled";
135 else
136 status = "disabled";
137
138 len += scnprintf(buf + len, buf_len - len,
139 "0x%02x - %20s - %s\n",
140 i, wmi_service_name(i), status);
141 }
142
143 ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
144
145 mutex_unlock(&ar->conf_mutex);
146
147 kfree(buf);
148 return ret_cnt;
149}
150
151static const struct file_operations fops_wmi_services = {
152 .read = ath10k_read_wmi_services,
153 .open = simple_open,
154 .owner = THIS_MODULE,
155 .llseek = default_llseek,
156};
157
158void ath10k_debug_read_target_stats(struct ath10k *ar,
159 struct wmi_stats_event *ev)
160{
161 u8 *tmp = ev->data;
162 struct ath10k_target_stats *stats;
163 int num_pdev_stats, num_vdev_stats, num_peer_stats;
164 struct wmi_pdev_stats *ps;
165 int i;
166
Michal Kazior87571bf2013-07-16 09:38:59 +0200167 spin_lock_bh(&ar->data_lock);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300168
169 stats = &ar->debug.target_stats;
170
171 num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats); /* 0 or 1 */
172 num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats); /* 0 or max vdevs */
173 num_peer_stats = __le32_to_cpu(ev->num_peer_stats); /* 0 or max peers */
174
175 if (num_pdev_stats) {
176 ps = (struct wmi_pdev_stats *)tmp;
177
178 stats->ch_noise_floor = __le32_to_cpu(ps->chan_nf);
179 stats->tx_frame_count = __le32_to_cpu(ps->tx_frame_count);
180 stats->rx_frame_count = __le32_to_cpu(ps->rx_frame_count);
181 stats->rx_clear_count = __le32_to_cpu(ps->rx_clear_count);
182 stats->cycle_count = __le32_to_cpu(ps->cycle_count);
183 stats->phy_err_count = __le32_to_cpu(ps->phy_err_count);
184 stats->chan_tx_power = __le32_to_cpu(ps->chan_tx_pwr);
185
186 stats->comp_queued = __le32_to_cpu(ps->wal.tx.comp_queued);
187 stats->comp_delivered =
188 __le32_to_cpu(ps->wal.tx.comp_delivered);
189 stats->msdu_enqued = __le32_to_cpu(ps->wal.tx.msdu_enqued);
190 stats->mpdu_enqued = __le32_to_cpu(ps->wal.tx.mpdu_enqued);
191 stats->wmm_drop = __le32_to_cpu(ps->wal.tx.wmm_drop);
192 stats->local_enqued = __le32_to_cpu(ps->wal.tx.local_enqued);
193 stats->local_freed = __le32_to_cpu(ps->wal.tx.local_freed);
194 stats->hw_queued = __le32_to_cpu(ps->wal.tx.hw_queued);
195 stats->hw_reaped = __le32_to_cpu(ps->wal.tx.hw_reaped);
196 stats->underrun = __le32_to_cpu(ps->wal.tx.underrun);
197 stats->tx_abort = __le32_to_cpu(ps->wal.tx.tx_abort);
198 stats->mpdus_requed = __le32_to_cpu(ps->wal.tx.mpdus_requed);
199 stats->tx_ko = __le32_to_cpu(ps->wal.tx.tx_ko);
200 stats->data_rc = __le32_to_cpu(ps->wal.tx.data_rc);
201 stats->self_triggers = __le32_to_cpu(ps->wal.tx.self_triggers);
202 stats->sw_retry_failure =
203 __le32_to_cpu(ps->wal.tx.sw_retry_failure);
204 stats->illgl_rate_phy_err =
205 __le32_to_cpu(ps->wal.tx.illgl_rate_phy_err);
206 stats->pdev_cont_xretry =
207 __le32_to_cpu(ps->wal.tx.pdev_cont_xretry);
208 stats->pdev_tx_timeout =
209 __le32_to_cpu(ps->wal.tx.pdev_tx_timeout);
210 stats->pdev_resets = __le32_to_cpu(ps->wal.tx.pdev_resets);
211 stats->phy_underrun = __le32_to_cpu(ps->wal.tx.phy_underrun);
212 stats->txop_ovf = __le32_to_cpu(ps->wal.tx.txop_ovf);
213
214 stats->mid_ppdu_route_change =
215 __le32_to_cpu(ps->wal.rx.mid_ppdu_route_change);
216 stats->status_rcvd = __le32_to_cpu(ps->wal.rx.status_rcvd);
217 stats->r0_frags = __le32_to_cpu(ps->wal.rx.r0_frags);
218 stats->r1_frags = __le32_to_cpu(ps->wal.rx.r1_frags);
219 stats->r2_frags = __le32_to_cpu(ps->wal.rx.r2_frags);
220 stats->r3_frags = __le32_to_cpu(ps->wal.rx.r3_frags);
221 stats->htt_msdus = __le32_to_cpu(ps->wal.rx.htt_msdus);
222 stats->htt_mpdus = __le32_to_cpu(ps->wal.rx.htt_mpdus);
223 stats->loc_msdus = __le32_to_cpu(ps->wal.rx.loc_msdus);
224 stats->loc_mpdus = __le32_to_cpu(ps->wal.rx.loc_mpdus);
225 stats->oversize_amsdu =
226 __le32_to_cpu(ps->wal.rx.oversize_amsdu);
227 stats->phy_errs = __le32_to_cpu(ps->wal.rx.phy_errs);
228 stats->phy_err_drop = __le32_to_cpu(ps->wal.rx.phy_err_drop);
229 stats->mpdu_errs = __le32_to_cpu(ps->wal.rx.mpdu_errs);
230
231 tmp += sizeof(struct wmi_pdev_stats);
232 }
233
234 /* 0 or max vdevs */
235 /* Currently firmware does not support VDEV stats */
236 if (num_vdev_stats) {
237 struct wmi_vdev_stats *vdev_stats;
238
239 for (i = 0; i < num_vdev_stats; i++) {
240 vdev_stats = (struct wmi_vdev_stats *)tmp;
241 tmp += sizeof(struct wmi_vdev_stats);
242 }
243 }
244
245 if (num_peer_stats) {
Ben Greear23c3aae2014-03-28 14:35:15 +0200246 struct wmi_peer_stats_10x *peer_stats;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300247 struct ath10k_peer_stat *s;
248
249 stats->peers = num_peer_stats;
250
251 for (i = 0; i < num_peer_stats; i++) {
Ben Greear23c3aae2014-03-28 14:35:15 +0200252 peer_stats = (struct wmi_peer_stats_10x *)tmp;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300253 s = &stats->peer_stat[i];
254
Chun-Yeow Yeohcf0fd562014-03-21 17:46:58 +0200255 memcpy(s->peer_macaddr, &peer_stats->peer_macaddr.addr,
256 ETH_ALEN);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300257 s->peer_rssi = __le32_to_cpu(peer_stats->peer_rssi);
258 s->peer_tx_rate =
259 __le32_to_cpu(peer_stats->peer_tx_rate);
Ben Greear23c3aae2014-03-28 14:35:15 +0200260 if (test_bit(ATH10K_FW_FEATURE_WMI_10X,
261 ar->fw_features)) {
262 s->peer_rx_rate =
263 __le32_to_cpu(peer_stats->peer_rx_rate);
264 tmp += sizeof(struct wmi_peer_stats_10x);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300265
Ben Greear23c3aae2014-03-28 14:35:15 +0200266 } else {
267 tmp += sizeof(struct wmi_peer_stats_old);
268 }
Kalle Valo5e3dd152013-06-12 20:52:10 +0300269 }
270 }
271
Michal Kazior87571bf2013-07-16 09:38:59 +0200272 spin_unlock_bh(&ar->data_lock);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300273 complete(&ar->debug.event_stats_compl);
274}
275
276static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
277 size_t count, loff_t *ppos)
278{
279 struct ath10k *ar = file->private_data;
280 struct ath10k_target_stats *fw_stats;
Michal Kazior87571bf2013-07-16 09:38:59 +0200281 char *buf = NULL;
Ben Greear23c3aae2014-03-28 14:35:15 +0200282 unsigned int len = 0, buf_len = 8000;
Michal Kazior87571bf2013-07-16 09:38:59 +0200283 ssize_t ret_cnt = 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300284 long left;
285 int i;
286 int ret;
287
288 fw_stats = &ar->debug.target_stats;
289
Michal Kazior87571bf2013-07-16 09:38:59 +0200290 mutex_lock(&ar->conf_mutex);
291
292 if (ar->state != ATH10K_STATE_ON)
293 goto exit;
294
Kalle Valo5e3dd152013-06-12 20:52:10 +0300295 buf = kzalloc(buf_len, GFP_KERNEL);
296 if (!buf)
Michal Kazior87571bf2013-07-16 09:38:59 +0200297 goto exit;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300298
299 ret = ath10k_wmi_request_stats(ar, WMI_REQUEST_PEER_STAT);
300 if (ret) {
301 ath10k_warn("could not request stats (%d)\n", ret);
Michal Kazior87571bf2013-07-16 09:38:59 +0200302 goto exit;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300303 }
304
305 left = wait_for_completion_timeout(&ar->debug.event_stats_compl, 1*HZ);
Michal Kazior87571bf2013-07-16 09:38:59 +0200306 if (left <= 0)
307 goto exit;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300308
Michal Kazior87571bf2013-07-16 09:38:59 +0200309 spin_lock_bh(&ar->data_lock);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300310 len += scnprintf(buf + len, buf_len - len, "\n");
311 len += scnprintf(buf + len, buf_len - len, "%30s\n",
312 "ath10k PDEV stats");
313 len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
314 "=================");
315
316 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
317 "Channel noise floor", fw_stats->ch_noise_floor);
318 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
319 "Channel TX power", fw_stats->chan_tx_power);
320 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
321 "TX frame count", fw_stats->tx_frame_count);
322 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
323 "RX frame count", fw_stats->rx_frame_count);
324 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
325 "RX clear count", fw_stats->rx_clear_count);
326 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
327 "Cycle count", fw_stats->cycle_count);
328 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
329 "PHY error count", fw_stats->phy_err_count);
330
331 len += scnprintf(buf + len, buf_len - len, "\n");
332 len += scnprintf(buf + len, buf_len - len, "%30s\n",
333 "ath10k PDEV TX stats");
334 len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
335 "=================");
336
337 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
338 "HTT cookies queued", fw_stats->comp_queued);
339 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
340 "HTT cookies disp.", fw_stats->comp_delivered);
341 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
342 "MSDU queued", fw_stats->msdu_enqued);
343 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
344 "MPDU queued", fw_stats->mpdu_enqued);
345 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
346 "MSDUs dropped", fw_stats->wmm_drop);
347 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
348 "Local enqued", fw_stats->local_enqued);
349 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
350 "Local freed", fw_stats->local_freed);
351 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
352 "HW queued", fw_stats->hw_queued);
353 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
354 "PPDUs reaped", fw_stats->hw_reaped);
355 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
356 "Num underruns", fw_stats->underrun);
357 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
358 "PPDUs cleaned", fw_stats->tx_abort);
359 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
360 "MPDUs requed", fw_stats->mpdus_requed);
361 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
362 "Excessive retries", fw_stats->tx_ko);
363 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
364 "HW rate", fw_stats->data_rc);
365 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
366 "Sched self tiggers", fw_stats->self_triggers);
367 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
368 "Dropped due to SW retries",
369 fw_stats->sw_retry_failure);
370 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
371 "Illegal rate phy errors",
372 fw_stats->illgl_rate_phy_err);
373 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
374 "Pdev continous xretry", fw_stats->pdev_cont_xretry);
375 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
376 "TX timeout", fw_stats->pdev_tx_timeout);
377 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
378 "PDEV resets", fw_stats->pdev_resets);
379 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
380 "PHY underrun", fw_stats->phy_underrun);
381 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
382 "MPDU is more than txop limit", fw_stats->txop_ovf);
383
384 len += scnprintf(buf + len, buf_len - len, "\n");
385 len += scnprintf(buf + len, buf_len - len, "%30s\n",
386 "ath10k PDEV RX stats");
387 len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
388 "=================");
389
390 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
391 "Mid PPDU route change",
392 fw_stats->mid_ppdu_route_change);
393 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
394 "Tot. number of statuses", fw_stats->status_rcvd);
395 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
396 "Extra frags on rings 0", fw_stats->r0_frags);
397 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
398 "Extra frags on rings 1", fw_stats->r1_frags);
399 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
400 "Extra frags on rings 2", fw_stats->r2_frags);
401 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
402 "Extra frags on rings 3", fw_stats->r3_frags);
403 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
404 "MSDUs delivered to HTT", fw_stats->htt_msdus);
405 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
406 "MPDUs delivered to HTT", fw_stats->htt_mpdus);
407 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
408 "MSDUs delivered to stack", fw_stats->loc_msdus);
409 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
410 "MPDUs delivered to stack", fw_stats->loc_mpdus);
411 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
412 "Oversized AMSUs", fw_stats->oversize_amsdu);
413 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
414 "PHY errors", fw_stats->phy_errs);
415 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
416 "PHY errors drops", fw_stats->phy_err_drop);
417 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
418 "MPDU errors (FCS, MIC, ENC)", fw_stats->mpdu_errs);
419
420 len += scnprintf(buf + len, buf_len - len, "\n");
Ben Greear23c3aae2014-03-28 14:35:15 +0200421 len += scnprintf(buf + len, buf_len - len, "%30s (%d)\n",
422 "ath10k PEER stats", fw_stats->peers);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300423 len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
424 "=================");
425
426 for (i = 0; i < fw_stats->peers; i++) {
427 len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
428 "Peer MAC address",
429 fw_stats->peer_stat[i].peer_macaddr);
430 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
431 "Peer RSSI", fw_stats->peer_stat[i].peer_rssi);
432 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
433 "Peer TX rate",
434 fw_stats->peer_stat[i].peer_tx_rate);
Ben Greear23c3aae2014-03-28 14:35:15 +0200435 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
436 "Peer RX rate",
437 fw_stats->peer_stat[i].peer_rx_rate);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300438 len += scnprintf(buf + len, buf_len - len, "\n");
439 }
Michal Kazior87571bf2013-07-16 09:38:59 +0200440 spin_unlock_bh(&ar->data_lock);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300441
442 if (len > buf_len)
443 len = buf_len;
444
445 ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
446
Michal Kazior87571bf2013-07-16 09:38:59 +0200447exit:
Kalle Valo5e3dd152013-06-12 20:52:10 +0300448 mutex_unlock(&ar->conf_mutex);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300449 kfree(buf);
450 return ret_cnt;
451}
452
453static const struct file_operations fops_fw_stats = {
454 .read = ath10k_read_fw_stats,
455 .open = simple_open,
456 .owner = THIS_MODULE,
457 .llseek = default_llseek,
458};
459
Michal Kazior278c4a82013-07-22 14:08:51 +0200460static ssize_t ath10k_read_simulate_fw_crash(struct file *file,
461 char __user *user_buf,
462 size_t count, loff_t *ppos)
463{
Marek Puzyniak8c656992014-03-21 17:46:56 +0200464 const char buf[] = "To simulate firmware crash write one of the"
465 " keywords to this file:\n `soft` - this will send"
466 " WMI_FORCE_FW_HANG_ASSERT to firmware if FW"
467 " supports that command.\n `hard` - this will send"
468 " to firmware command with illegal parameters"
469 " causing firmware crash.\n";
470
Michal Kazior278c4a82013-07-22 14:08:51 +0200471 return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
472}
473
Marek Puzyniak8c656992014-03-21 17:46:56 +0200474/* Simulate firmware crash:
475 * 'soft': Call wmi command causing firmware hang. This firmware hang is
476 * recoverable by warm firmware reset.
477 * 'hard': Force firmware crash by setting any vdev parameter for not allowed
478 * vdev id. This is hard firmware crash because it is recoverable only by cold
479 * firmware reset.
480 */
Michal Kazior278c4a82013-07-22 14:08:51 +0200481static ssize_t ath10k_write_simulate_fw_crash(struct file *file,
482 const char __user *user_buf,
483 size_t count, loff_t *ppos)
484{
485 struct ath10k *ar = file->private_data;
Marek Puzyniak8c656992014-03-21 17:46:56 +0200486 char buf[32];
Michal Kazior278c4a82013-07-22 14:08:51 +0200487 int ret;
488
489 mutex_lock(&ar->conf_mutex);
490
491 simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
Marek Puzyniak8c656992014-03-21 17:46:56 +0200492
493 /* make sure that buf is null terminated */
494 buf[sizeof(buf) - 1] = 0;
Michal Kazior278c4a82013-07-22 14:08:51 +0200495
496 if (ar->state != ATH10K_STATE_ON &&
497 ar->state != ATH10K_STATE_RESTARTED) {
498 ret = -ENETDOWN;
499 goto exit;
500 }
501
Marek Puzyniak8c656992014-03-21 17:46:56 +0200502 /* drop the possible '\n' from the end */
503 if (buf[count - 1] == '\n') {
504 buf[count - 1] = 0;
505 count--;
506 }
Michal Kazior278c4a82013-07-22 14:08:51 +0200507
Marek Puzyniak8c656992014-03-21 17:46:56 +0200508 if (!strcmp(buf, "soft")) {
509 ath10k_info("simulating soft firmware crash\n");
510 ret = ath10k_wmi_force_fw_hang(ar, WMI_FORCE_FW_HANG_ASSERT, 0);
511 } else if (!strcmp(buf, "hard")) {
512 ath10k_info("simulating hard firmware crash\n");
513 ret = ath10k_wmi_vdev_set_param(ar, TARGET_NUM_VDEVS + 1,
514 ar->wmi.vdev_param->rts_threshold, 0);
515 } else {
516 ret = -EINVAL;
517 goto exit;
518 }
Michal Kazior278c4a82013-07-22 14:08:51 +0200519
Marek Puzyniak8c656992014-03-21 17:46:56 +0200520 if (ret) {
521 ath10k_warn("failed to simulate firmware crash: %d\n", ret);
522 goto exit;
523 }
524
525 ret = count;
Michal Kazior278c4a82013-07-22 14:08:51 +0200526
527exit:
528 mutex_unlock(&ar->conf_mutex);
529 return ret;
530}
531
532static const struct file_operations fops_simulate_fw_crash = {
533 .read = ath10k_read_simulate_fw_crash,
534 .write = ath10k_write_simulate_fw_crash,
535 .open = simple_open,
536 .owner = THIS_MODULE,
537 .llseek = default_llseek,
538};
539
Kalle Valo763b8cd2013-09-01 11:22:21 +0300540static ssize_t ath10k_read_chip_id(struct file *file, char __user *user_buf,
541 size_t count, loff_t *ppos)
542{
543 struct ath10k *ar = file->private_data;
544 unsigned int len;
545 char buf[50];
546
547 len = scnprintf(buf, sizeof(buf), "0x%08x\n", ar->chip_id);
548
549 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
550}
551
552static const struct file_operations fops_chip_id = {
553 .read = ath10k_read_chip_id,
554 .open = simple_open,
555 .owner = THIS_MODULE,
556 .llseek = default_llseek,
557};
558
Kalle Valoa3d135e2013-09-03 11:44:10 +0300559static int ath10k_debug_htt_stats_req(struct ath10k *ar)
560{
561 u64 cookie;
562 int ret;
563
564 lockdep_assert_held(&ar->conf_mutex);
565
566 if (ar->debug.htt_stats_mask == 0)
567 /* htt stats are disabled */
568 return 0;
569
570 if (ar->state != ATH10K_STATE_ON)
571 return 0;
572
573 cookie = get_jiffies_64();
574
575 ret = ath10k_htt_h2t_stats_req(&ar->htt, ar->debug.htt_stats_mask,
576 cookie);
577 if (ret) {
578 ath10k_warn("failed to send htt stats request: %d\n", ret);
579 return ret;
580 }
581
582 queue_delayed_work(ar->workqueue, &ar->debug.htt_stats_dwork,
583 msecs_to_jiffies(ATH10K_DEBUG_HTT_STATS_INTERVAL));
584
585 return 0;
586}
587
588static void ath10k_debug_htt_stats_dwork(struct work_struct *work)
589{
590 struct ath10k *ar = container_of(work, struct ath10k,
591 debug.htt_stats_dwork.work);
592
593 mutex_lock(&ar->conf_mutex);
594
595 ath10k_debug_htt_stats_req(ar);
596
597 mutex_unlock(&ar->conf_mutex);
598}
599
600static ssize_t ath10k_read_htt_stats_mask(struct file *file,
601 char __user *user_buf,
602 size_t count, loff_t *ppos)
603{
604 struct ath10k *ar = file->private_data;
605 char buf[32];
606 unsigned int len;
607
608 len = scnprintf(buf, sizeof(buf), "%lu\n", ar->debug.htt_stats_mask);
609
610 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
611}
612
613static ssize_t ath10k_write_htt_stats_mask(struct file *file,
614 const char __user *user_buf,
615 size_t count, loff_t *ppos)
616{
617 struct ath10k *ar = file->private_data;
618 unsigned long mask;
619 int ret;
620
621 ret = kstrtoul_from_user(user_buf, count, 0, &mask);
622 if (ret)
623 return ret;
624
625 /* max 8 bit masks (for now) */
626 if (mask > 0xff)
627 return -E2BIG;
628
629 mutex_lock(&ar->conf_mutex);
630
631 ar->debug.htt_stats_mask = mask;
632
633 ret = ath10k_debug_htt_stats_req(ar);
634 if (ret)
635 goto out;
636
637 ret = count;
638
639out:
640 mutex_unlock(&ar->conf_mutex);
641
642 return ret;
643}
644
645static const struct file_operations fops_htt_stats_mask = {
646 .read = ath10k_read_htt_stats_mask,
647 .write = ath10k_write_htt_stats_mask,
648 .open = simple_open,
649 .owner = THIS_MODULE,
650 .llseek = default_llseek,
651};
652
Kalle Valof118a3e2014-01-03 12:59:31 +0200653static ssize_t ath10k_read_fw_dbglog(struct file *file,
654 char __user *user_buf,
655 size_t count, loff_t *ppos)
656{
657 struct ath10k *ar = file->private_data;
658 unsigned int len;
659 char buf[32];
660
661 len = scnprintf(buf, sizeof(buf), "0x%08x\n",
662 ar->debug.fw_dbglog_mask);
663
664 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
665}
666
667static ssize_t ath10k_write_fw_dbglog(struct file *file,
668 const char __user *user_buf,
669 size_t count, loff_t *ppos)
670{
671 struct ath10k *ar = file->private_data;
672 unsigned long mask;
673 int ret;
674
675 ret = kstrtoul_from_user(user_buf, count, 0, &mask);
676 if (ret)
677 return ret;
678
679 mutex_lock(&ar->conf_mutex);
680
681 ar->debug.fw_dbglog_mask = mask;
682
683 if (ar->state == ATH10K_STATE_ON) {
684 ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask);
685 if (ret) {
686 ath10k_warn("dbglog cfg failed from debugfs: %d\n",
687 ret);
688 goto exit;
689 }
690 }
691
692 ret = count;
693
694exit:
695 mutex_unlock(&ar->conf_mutex);
696
697 return ret;
698}
699
700static const struct file_operations fops_fw_dbglog = {
701 .read = ath10k_read_fw_dbglog,
702 .write = ath10k_write_fw_dbglog,
703 .open = simple_open,
704 .owner = THIS_MODULE,
705 .llseek = default_llseek,
706};
707
Kalle Valodb66ea02013-09-03 11:44:03 +0300708int ath10k_debug_start(struct ath10k *ar)
709{
Kalle Valoa3d135e2013-09-03 11:44:10 +0300710 int ret;
711
Kalle Valo60631c52013-10-08 21:45:25 +0300712 lockdep_assert_held(&ar->conf_mutex);
713
Kalle Valoa3d135e2013-09-03 11:44:10 +0300714 ret = ath10k_debug_htt_stats_req(ar);
715 if (ret)
716 /* continue normally anyway, this isn't serious */
717 ath10k_warn("failed to start htt stats workqueue: %d\n", ret);
718
Kalle Valof118a3e2014-01-03 12:59:31 +0200719 if (ar->debug.fw_dbglog_mask) {
720 ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask);
721 if (ret)
722 /* not serious */
723 ath10k_warn("failed to enable dbglog during start: %d",
724 ret);
725 }
726
Kalle Valodb66ea02013-09-03 11:44:03 +0300727 return 0;
728}
729
730void ath10k_debug_stop(struct ath10k *ar)
731{
Kalle Valo60631c52013-10-08 21:45:25 +0300732 lockdep_assert_held(&ar->conf_mutex);
733
734 /* Must not use _sync to avoid deadlock, we do that in
735 * ath10k_debug_destroy(). The check for htt_stats_mask is to avoid
736 * warning from del_timer(). */
737 if (ar->debug.htt_stats_mask != 0)
738 cancel_delayed_work(&ar->debug.htt_stats_dwork);
Kalle Valodb66ea02013-09-03 11:44:03 +0300739}
740
Janusz Dziedzic9702c682013-11-20 09:59:41 +0200741static ssize_t ath10k_write_simulate_radar(struct file *file,
742 const char __user *user_buf,
743 size_t count, loff_t *ppos)
744{
745 struct ath10k *ar = file->private_data;
746
747 ieee80211_radar_detected(ar->hw);
748
749 return count;
750}
751
752static const struct file_operations fops_simulate_radar = {
753 .write = ath10k_write_simulate_radar,
754 .open = simple_open,
755 .owner = THIS_MODULE,
756 .llseek = default_llseek,
757};
758
759#define ATH10K_DFS_STAT(s, p) (\
760 len += scnprintf(buf + len, size - len, "%-28s : %10u\n", s, \
761 ar->debug.dfs_stats.p))
762
763#define ATH10K_DFS_POOL_STAT(s, p) (\
764 len += scnprintf(buf + len, size - len, "%-28s : %10u\n", s, \
765 ar->debug.dfs_pool_stats.p))
766
767static ssize_t ath10k_read_dfs_stats(struct file *file, char __user *user_buf,
768 size_t count, loff_t *ppos)
769{
770 int retval = 0, len = 0;
771 const int size = 8000;
772 struct ath10k *ar = file->private_data;
773 char *buf;
774
775 buf = kzalloc(size, GFP_KERNEL);
776 if (buf == NULL)
777 return -ENOMEM;
778
779 if (!ar->dfs_detector) {
780 len += scnprintf(buf + len, size - len, "DFS not enabled\n");
781 goto exit;
782 }
783
784 ar->debug.dfs_pool_stats =
785 ar->dfs_detector->get_stats(ar->dfs_detector);
786
787 len += scnprintf(buf + len, size - len, "Pulse detector statistics:\n");
788
789 ATH10K_DFS_STAT("reported phy errors", phy_errors);
790 ATH10K_DFS_STAT("pulse events reported", pulses_total);
791 ATH10K_DFS_STAT("DFS pulses detected", pulses_detected);
792 ATH10K_DFS_STAT("DFS pulses discarded", pulses_discarded);
793 ATH10K_DFS_STAT("Radars detected", radar_detected);
794
795 len += scnprintf(buf + len, size - len, "Global Pool statistics:\n");
796 ATH10K_DFS_POOL_STAT("Pool references", pool_reference);
797 ATH10K_DFS_POOL_STAT("Pulses allocated", pulse_allocated);
798 ATH10K_DFS_POOL_STAT("Pulses alloc error", pulse_alloc_error);
799 ATH10K_DFS_POOL_STAT("Pulses in use", pulse_used);
800 ATH10K_DFS_POOL_STAT("Seqs. allocated", pseq_allocated);
801 ATH10K_DFS_POOL_STAT("Seqs. alloc error", pseq_alloc_error);
802 ATH10K_DFS_POOL_STAT("Seqs. in use", pseq_used);
803
804exit:
805 if (len > size)
806 len = size;
807
808 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
809 kfree(buf);
810
811 return retval;
812}
813
814static const struct file_operations fops_dfs_stats = {
815 .read = ath10k_read_dfs_stats,
816 .open = simple_open,
817 .owner = THIS_MODULE,
818 .llseek = default_llseek,
819};
820
Kalle Valo5e3dd152013-06-12 20:52:10 +0300821int ath10k_debug_create(struct ath10k *ar)
822{
823 ar->debug.debugfs_phy = debugfs_create_dir("ath10k",
824 ar->hw->wiphy->debugfsdir);
825
826 if (!ar->debug.debugfs_phy)
827 return -ENOMEM;
828
Kalle Valoa3d135e2013-09-03 11:44:10 +0300829 INIT_DELAYED_WORK(&ar->debug.htt_stats_dwork,
830 ath10k_debug_htt_stats_dwork);
831
Kalle Valo5e3dd152013-06-12 20:52:10 +0300832 init_completion(&ar->debug.event_stats_compl);
833
834 debugfs_create_file("fw_stats", S_IRUSR, ar->debug.debugfs_phy, ar,
835 &fops_fw_stats);
836
837 debugfs_create_file("wmi_services", S_IRUSR, ar->debug.debugfs_phy, ar,
838 &fops_wmi_services);
839
Michal Kazior278c4a82013-07-22 14:08:51 +0200840 debugfs_create_file("simulate_fw_crash", S_IRUSR, ar->debug.debugfs_phy,
841 ar, &fops_simulate_fw_crash);
842
Kalle Valo763b8cd2013-09-01 11:22:21 +0300843 debugfs_create_file("chip_id", S_IRUSR, ar->debug.debugfs_phy,
844 ar, &fops_chip_id);
845
Kalle Valoa3d135e2013-09-03 11:44:10 +0300846 debugfs_create_file("htt_stats_mask", S_IRUSR, ar->debug.debugfs_phy,
847 ar, &fops_htt_stats_mask);
848
Kalle Valof118a3e2014-01-03 12:59:31 +0200849 debugfs_create_file("fw_dbglog", S_IRUSR, ar->debug.debugfs_phy,
850 ar, &fops_fw_dbglog);
851
Janusz Dziedzic9702c682013-11-20 09:59:41 +0200852 if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) {
853 debugfs_create_file("dfs_simulate_radar", S_IWUSR,
854 ar->debug.debugfs_phy, ar,
855 &fops_simulate_radar);
856
Marek Puzyniak7d9b40b2013-11-20 10:00:28 +0200857 debugfs_create_bool("dfs_block_radar_events", S_IWUSR,
858 ar->debug.debugfs_phy,
859 &ar->dfs_block_radar_events);
860
Janusz Dziedzic9702c682013-11-20 09:59:41 +0200861 debugfs_create_file("dfs_stats", S_IRUSR,
862 ar->debug.debugfs_phy, ar,
863 &fops_dfs_stats);
864 }
865
Kalle Valo5e3dd152013-06-12 20:52:10 +0300866 return 0;
867}
Kalle Valodb66ea02013-09-03 11:44:03 +0300868
Kalle Valo60631c52013-10-08 21:45:25 +0300869void ath10k_debug_destroy(struct ath10k *ar)
870{
871 cancel_delayed_work_sync(&ar->debug.htt_stats_dwork);
872}
873
Kalle Valo5e3dd152013-06-12 20:52:10 +0300874#endif /* CONFIG_ATH10K_DEBUGFS */
875
876#ifdef CONFIG_ATH10K_DEBUG
877void ath10k_dbg(enum ath10k_debug_mask mask, const char *fmt, ...)
878{
879 struct va_format vaf;
880 va_list args;
881
882 va_start(args, fmt);
883
884 vaf.fmt = fmt;
885 vaf.va = &args;
886
887 if (ath10k_debug_mask & mask)
888 ath10k_printk(KERN_DEBUG, "%pV", &vaf);
889
890 trace_ath10k_log_dbg(mask, &vaf);
891
892 va_end(args);
893}
894EXPORT_SYMBOL(ath10k_dbg);
895
896void ath10k_dbg_dump(enum ath10k_debug_mask mask,
897 const char *msg, const char *prefix,
898 const void *buf, size_t len)
899{
900 if (ath10k_debug_mask & mask) {
901 if (msg)
902 ath10k_dbg(mask, "%s\n", msg);
903
904 print_hex_dump_bytes(prefix, DUMP_PREFIX_OFFSET, buf, len);
905 }
906
907 /* tracing code doesn't like null strings :/ */
908 trace_ath10k_log_dbg_dump(msg ? msg : "", prefix ? prefix : "",
909 buf, len);
910}
911EXPORT_SYMBOL(ath10k_dbg_dump);
912
913#endif /* CONFIG_ATH10K_DEBUG */