blob: 760ff2289e3cf2903773544d3349cab7bde1a852 [file] [log] [blame]
Kalle Valo5e3dd152013-06-12 20:52:10 +03001/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include <linux/module.h>
19#include <linux/debugfs.h>
20
21#include "core.h"
22#include "debug.h"
23
Kalle Valoa3d135e2013-09-03 11:44:10 +030024/* ms */
25#define ATH10K_DEBUG_HTT_STATS_INTERVAL 1000
26
Kalle Valo5e3dd152013-06-12 20:52:10 +030027static int ath10k_printk(const char *level, const char *fmt, ...)
28{
29 struct va_format vaf;
30 va_list args;
31 int rtn;
32
33 va_start(args, fmt);
34
35 vaf.fmt = fmt;
36 vaf.va = &args;
37
38 rtn = printk("%sath10k: %pV", level, &vaf);
39
40 va_end(args);
41
42 return rtn;
43}
44
45int ath10k_info(const char *fmt, ...)
46{
47 struct va_format vaf = {
48 .fmt = fmt,
49 };
50 va_list args;
51 int ret;
52
53 va_start(args, fmt);
54 vaf.va = &args;
55 ret = ath10k_printk(KERN_INFO, "%pV", &vaf);
56 trace_ath10k_log_info(&vaf);
57 va_end(args);
58
59 return ret;
60}
61EXPORT_SYMBOL(ath10k_info);
62
63int ath10k_err(const char *fmt, ...)
64{
65 struct va_format vaf = {
66 .fmt = fmt,
67 };
68 va_list args;
69 int ret;
70
71 va_start(args, fmt);
72 vaf.va = &args;
73 ret = ath10k_printk(KERN_ERR, "%pV", &vaf);
74 trace_ath10k_log_err(&vaf);
75 va_end(args);
76
77 return ret;
78}
79EXPORT_SYMBOL(ath10k_err);
80
81int ath10k_warn(const char *fmt, ...)
82{
83 struct va_format vaf = {
84 .fmt = fmt,
85 };
86 va_list args;
87 int ret = 0;
88
89 va_start(args, fmt);
90 vaf.va = &args;
91
92 if (net_ratelimit())
93 ret = ath10k_printk(KERN_WARNING, "%pV", &vaf);
94
95 trace_ath10k_log_warn(&vaf);
96
97 va_end(args);
98
99 return ret;
100}
101EXPORT_SYMBOL(ath10k_warn);
102
103#ifdef CONFIG_ATH10K_DEBUGFS
104
105void ath10k_debug_read_service_map(struct ath10k *ar,
106 void *service_map,
107 size_t map_size)
108{
109 memcpy(ar->debug.wmi_service_bitmap, service_map, map_size);
110}
111
112static ssize_t ath10k_read_wmi_services(struct file *file,
113 char __user *user_buf,
114 size_t count, loff_t *ppos)
115{
116 struct ath10k *ar = file->private_data;
117 char *buf;
118 unsigned int len = 0, buf_len = 1500;
119 const char *status;
120 ssize_t ret_cnt;
121 int i;
122
123 buf = kzalloc(buf_len, GFP_KERNEL);
124 if (!buf)
125 return -ENOMEM;
126
127 mutex_lock(&ar->conf_mutex);
128
129 if (len > buf_len)
130 len = buf_len;
131
132 for (i = 0; i < WMI_SERVICE_LAST; i++) {
133 if (WMI_SERVICE_IS_ENABLED(ar->debug.wmi_service_bitmap, i))
134 status = "enabled";
135 else
136 status = "disabled";
137
138 len += scnprintf(buf + len, buf_len - len,
139 "0x%02x - %20s - %s\n",
140 i, wmi_service_name(i), status);
141 }
142
143 ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
144
145 mutex_unlock(&ar->conf_mutex);
146
147 kfree(buf);
148 return ret_cnt;
149}
150
151static const struct file_operations fops_wmi_services = {
152 .read = ath10k_read_wmi_services,
153 .open = simple_open,
154 .owner = THIS_MODULE,
155 .llseek = default_llseek,
156};
157
158void ath10k_debug_read_target_stats(struct ath10k *ar,
159 struct wmi_stats_event *ev)
160{
161 u8 *tmp = ev->data;
162 struct ath10k_target_stats *stats;
163 int num_pdev_stats, num_vdev_stats, num_peer_stats;
164 struct wmi_pdev_stats *ps;
165 int i;
166
Michal Kazior87571bf2013-07-16 09:38:59 +0200167 spin_lock_bh(&ar->data_lock);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300168
169 stats = &ar->debug.target_stats;
170
171 num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats); /* 0 or 1 */
172 num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats); /* 0 or max vdevs */
173 num_peer_stats = __le32_to_cpu(ev->num_peer_stats); /* 0 or max peers */
174
175 if (num_pdev_stats) {
176 ps = (struct wmi_pdev_stats *)tmp;
177
178 stats->ch_noise_floor = __le32_to_cpu(ps->chan_nf);
179 stats->tx_frame_count = __le32_to_cpu(ps->tx_frame_count);
180 stats->rx_frame_count = __le32_to_cpu(ps->rx_frame_count);
181 stats->rx_clear_count = __le32_to_cpu(ps->rx_clear_count);
182 stats->cycle_count = __le32_to_cpu(ps->cycle_count);
183 stats->phy_err_count = __le32_to_cpu(ps->phy_err_count);
184 stats->chan_tx_power = __le32_to_cpu(ps->chan_tx_pwr);
185
186 stats->comp_queued = __le32_to_cpu(ps->wal.tx.comp_queued);
187 stats->comp_delivered =
188 __le32_to_cpu(ps->wal.tx.comp_delivered);
189 stats->msdu_enqued = __le32_to_cpu(ps->wal.tx.msdu_enqued);
190 stats->mpdu_enqued = __le32_to_cpu(ps->wal.tx.mpdu_enqued);
191 stats->wmm_drop = __le32_to_cpu(ps->wal.tx.wmm_drop);
192 stats->local_enqued = __le32_to_cpu(ps->wal.tx.local_enqued);
193 stats->local_freed = __le32_to_cpu(ps->wal.tx.local_freed);
194 stats->hw_queued = __le32_to_cpu(ps->wal.tx.hw_queued);
195 stats->hw_reaped = __le32_to_cpu(ps->wal.tx.hw_reaped);
196 stats->underrun = __le32_to_cpu(ps->wal.tx.underrun);
197 stats->tx_abort = __le32_to_cpu(ps->wal.tx.tx_abort);
198 stats->mpdus_requed = __le32_to_cpu(ps->wal.tx.mpdus_requed);
199 stats->tx_ko = __le32_to_cpu(ps->wal.tx.tx_ko);
200 stats->data_rc = __le32_to_cpu(ps->wal.tx.data_rc);
201 stats->self_triggers = __le32_to_cpu(ps->wal.tx.self_triggers);
202 stats->sw_retry_failure =
203 __le32_to_cpu(ps->wal.tx.sw_retry_failure);
204 stats->illgl_rate_phy_err =
205 __le32_to_cpu(ps->wal.tx.illgl_rate_phy_err);
206 stats->pdev_cont_xretry =
207 __le32_to_cpu(ps->wal.tx.pdev_cont_xretry);
208 stats->pdev_tx_timeout =
209 __le32_to_cpu(ps->wal.tx.pdev_tx_timeout);
210 stats->pdev_resets = __le32_to_cpu(ps->wal.tx.pdev_resets);
211 stats->phy_underrun = __le32_to_cpu(ps->wal.tx.phy_underrun);
212 stats->txop_ovf = __le32_to_cpu(ps->wal.tx.txop_ovf);
213
214 stats->mid_ppdu_route_change =
215 __le32_to_cpu(ps->wal.rx.mid_ppdu_route_change);
216 stats->status_rcvd = __le32_to_cpu(ps->wal.rx.status_rcvd);
217 stats->r0_frags = __le32_to_cpu(ps->wal.rx.r0_frags);
218 stats->r1_frags = __le32_to_cpu(ps->wal.rx.r1_frags);
219 stats->r2_frags = __le32_to_cpu(ps->wal.rx.r2_frags);
220 stats->r3_frags = __le32_to_cpu(ps->wal.rx.r3_frags);
221 stats->htt_msdus = __le32_to_cpu(ps->wal.rx.htt_msdus);
222 stats->htt_mpdus = __le32_to_cpu(ps->wal.rx.htt_mpdus);
223 stats->loc_msdus = __le32_to_cpu(ps->wal.rx.loc_msdus);
224 stats->loc_mpdus = __le32_to_cpu(ps->wal.rx.loc_mpdus);
225 stats->oversize_amsdu =
226 __le32_to_cpu(ps->wal.rx.oversize_amsdu);
227 stats->phy_errs = __le32_to_cpu(ps->wal.rx.phy_errs);
228 stats->phy_err_drop = __le32_to_cpu(ps->wal.rx.phy_err_drop);
229 stats->mpdu_errs = __le32_to_cpu(ps->wal.rx.mpdu_errs);
230
231 tmp += sizeof(struct wmi_pdev_stats);
232 }
233
234 /* 0 or max vdevs */
235 /* Currently firmware does not support VDEV stats */
236 if (num_vdev_stats) {
237 struct wmi_vdev_stats *vdev_stats;
238
239 for (i = 0; i < num_vdev_stats; i++) {
240 vdev_stats = (struct wmi_vdev_stats *)tmp;
241 tmp += sizeof(struct wmi_vdev_stats);
242 }
243 }
244
245 if (num_peer_stats) {
246 struct wmi_peer_stats *peer_stats;
247 struct ath10k_peer_stat *s;
248
249 stats->peers = num_peer_stats;
250
251 for (i = 0; i < num_peer_stats; i++) {
252 peer_stats = (struct wmi_peer_stats *)tmp;
253 s = &stats->peer_stat[i];
254
255 WMI_MAC_ADDR_TO_CHAR_ARRAY(&peer_stats->peer_macaddr,
256 s->peer_macaddr);
257 s->peer_rssi = __le32_to_cpu(peer_stats->peer_rssi);
258 s->peer_tx_rate =
259 __le32_to_cpu(peer_stats->peer_tx_rate);
260
261 tmp += sizeof(struct wmi_peer_stats);
262 }
263 }
264
Michal Kazior87571bf2013-07-16 09:38:59 +0200265 spin_unlock_bh(&ar->data_lock);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300266 complete(&ar->debug.event_stats_compl);
267}
268
269static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
270 size_t count, loff_t *ppos)
271{
272 struct ath10k *ar = file->private_data;
273 struct ath10k_target_stats *fw_stats;
Michal Kazior87571bf2013-07-16 09:38:59 +0200274 char *buf = NULL;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300275 unsigned int len = 0, buf_len = 2500;
Michal Kazior87571bf2013-07-16 09:38:59 +0200276 ssize_t ret_cnt = 0;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300277 long left;
278 int i;
279 int ret;
280
281 fw_stats = &ar->debug.target_stats;
282
Michal Kazior87571bf2013-07-16 09:38:59 +0200283 mutex_lock(&ar->conf_mutex);
284
285 if (ar->state != ATH10K_STATE_ON)
286 goto exit;
287
Kalle Valo5e3dd152013-06-12 20:52:10 +0300288 buf = kzalloc(buf_len, GFP_KERNEL);
289 if (!buf)
Michal Kazior87571bf2013-07-16 09:38:59 +0200290 goto exit;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300291
292 ret = ath10k_wmi_request_stats(ar, WMI_REQUEST_PEER_STAT);
293 if (ret) {
294 ath10k_warn("could not request stats (%d)\n", ret);
Michal Kazior87571bf2013-07-16 09:38:59 +0200295 goto exit;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300296 }
297
298 left = wait_for_completion_timeout(&ar->debug.event_stats_compl, 1*HZ);
Michal Kazior87571bf2013-07-16 09:38:59 +0200299 if (left <= 0)
300 goto exit;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300301
Michal Kazior87571bf2013-07-16 09:38:59 +0200302 spin_lock_bh(&ar->data_lock);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300303 len += scnprintf(buf + len, buf_len - len, "\n");
304 len += scnprintf(buf + len, buf_len - len, "%30s\n",
305 "ath10k PDEV stats");
306 len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
307 "=================");
308
309 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
310 "Channel noise floor", fw_stats->ch_noise_floor);
311 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
312 "Channel TX power", fw_stats->chan_tx_power);
313 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
314 "TX frame count", fw_stats->tx_frame_count);
315 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
316 "RX frame count", fw_stats->rx_frame_count);
317 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
318 "RX clear count", fw_stats->rx_clear_count);
319 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
320 "Cycle count", fw_stats->cycle_count);
321 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
322 "PHY error count", fw_stats->phy_err_count);
323
324 len += scnprintf(buf + len, buf_len - len, "\n");
325 len += scnprintf(buf + len, buf_len - len, "%30s\n",
326 "ath10k PDEV TX stats");
327 len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
328 "=================");
329
330 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
331 "HTT cookies queued", fw_stats->comp_queued);
332 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
333 "HTT cookies disp.", fw_stats->comp_delivered);
334 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
335 "MSDU queued", fw_stats->msdu_enqued);
336 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
337 "MPDU queued", fw_stats->mpdu_enqued);
338 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
339 "MSDUs dropped", fw_stats->wmm_drop);
340 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
341 "Local enqued", fw_stats->local_enqued);
342 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
343 "Local freed", fw_stats->local_freed);
344 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
345 "HW queued", fw_stats->hw_queued);
346 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
347 "PPDUs reaped", fw_stats->hw_reaped);
348 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
349 "Num underruns", fw_stats->underrun);
350 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
351 "PPDUs cleaned", fw_stats->tx_abort);
352 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
353 "MPDUs requed", fw_stats->mpdus_requed);
354 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
355 "Excessive retries", fw_stats->tx_ko);
356 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
357 "HW rate", fw_stats->data_rc);
358 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
359 "Sched self tiggers", fw_stats->self_triggers);
360 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
361 "Dropped due to SW retries",
362 fw_stats->sw_retry_failure);
363 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
364 "Illegal rate phy errors",
365 fw_stats->illgl_rate_phy_err);
366 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
367 "Pdev continous xretry", fw_stats->pdev_cont_xretry);
368 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
369 "TX timeout", fw_stats->pdev_tx_timeout);
370 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
371 "PDEV resets", fw_stats->pdev_resets);
372 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
373 "PHY underrun", fw_stats->phy_underrun);
374 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
375 "MPDU is more than txop limit", fw_stats->txop_ovf);
376
377 len += scnprintf(buf + len, buf_len - len, "\n");
378 len += scnprintf(buf + len, buf_len - len, "%30s\n",
379 "ath10k PDEV RX stats");
380 len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
381 "=================");
382
383 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
384 "Mid PPDU route change",
385 fw_stats->mid_ppdu_route_change);
386 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
387 "Tot. number of statuses", fw_stats->status_rcvd);
388 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
389 "Extra frags on rings 0", fw_stats->r0_frags);
390 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
391 "Extra frags on rings 1", fw_stats->r1_frags);
392 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
393 "Extra frags on rings 2", fw_stats->r2_frags);
394 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
395 "Extra frags on rings 3", fw_stats->r3_frags);
396 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
397 "MSDUs delivered to HTT", fw_stats->htt_msdus);
398 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
399 "MPDUs delivered to HTT", fw_stats->htt_mpdus);
400 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
401 "MSDUs delivered to stack", fw_stats->loc_msdus);
402 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
403 "MPDUs delivered to stack", fw_stats->loc_mpdus);
404 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
405 "Oversized AMSUs", fw_stats->oversize_amsdu);
406 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
407 "PHY errors", fw_stats->phy_errs);
408 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
409 "PHY errors drops", fw_stats->phy_err_drop);
410 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
411 "MPDU errors (FCS, MIC, ENC)", fw_stats->mpdu_errs);
412
413 len += scnprintf(buf + len, buf_len - len, "\n");
414 len += scnprintf(buf + len, buf_len - len, "%30s\n",
415 "ath10k PEER stats");
416 len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
417 "=================");
418
419 for (i = 0; i < fw_stats->peers; i++) {
420 len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
421 "Peer MAC address",
422 fw_stats->peer_stat[i].peer_macaddr);
423 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
424 "Peer RSSI", fw_stats->peer_stat[i].peer_rssi);
425 len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
426 "Peer TX rate",
427 fw_stats->peer_stat[i].peer_tx_rate);
428 len += scnprintf(buf + len, buf_len - len, "\n");
429 }
Michal Kazior87571bf2013-07-16 09:38:59 +0200430 spin_unlock_bh(&ar->data_lock);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300431
432 if (len > buf_len)
433 len = buf_len;
434
435 ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
436
Michal Kazior87571bf2013-07-16 09:38:59 +0200437exit:
Kalle Valo5e3dd152013-06-12 20:52:10 +0300438 mutex_unlock(&ar->conf_mutex);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300439 kfree(buf);
440 return ret_cnt;
441}
442
443static const struct file_operations fops_fw_stats = {
444 .read = ath10k_read_fw_stats,
445 .open = simple_open,
446 .owner = THIS_MODULE,
447 .llseek = default_llseek,
448};
449
Michal Kazior278c4a82013-07-22 14:08:51 +0200450static ssize_t ath10k_read_simulate_fw_crash(struct file *file,
451 char __user *user_buf,
452 size_t count, loff_t *ppos)
453{
454 const char buf[] = "To simulate firmware crash write the keyword"
455 " `crash` to this file.\nThis will force firmware"
456 " to report a crash to the host system.\n";
457 return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
458}
459
460static ssize_t ath10k_write_simulate_fw_crash(struct file *file,
461 const char __user *user_buf,
462 size_t count, loff_t *ppos)
463{
464 struct ath10k *ar = file->private_data;
465 char buf[32] = {};
466 int ret;
467
468 mutex_lock(&ar->conf_mutex);
469
470 simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
471 if (strcmp(buf, "crash") && strcmp(buf, "crash\n")) {
472 ret = -EINVAL;
473 goto exit;
474 }
475
476 if (ar->state != ATH10K_STATE_ON &&
477 ar->state != ATH10K_STATE_RESTARTED) {
478 ret = -ENETDOWN;
479 goto exit;
480 }
481
482 ath10k_info("simulating firmware crash\n");
483
484 ret = ath10k_wmi_force_fw_hang(ar, WMI_FORCE_FW_HANG_ASSERT, 0);
485 if (ret)
486 ath10k_warn("failed to force fw hang (%d)\n", ret);
487
488 if (ret == 0)
489 ret = count;
490
491exit:
492 mutex_unlock(&ar->conf_mutex);
493 return ret;
494}
495
496static const struct file_operations fops_simulate_fw_crash = {
497 .read = ath10k_read_simulate_fw_crash,
498 .write = ath10k_write_simulate_fw_crash,
499 .open = simple_open,
500 .owner = THIS_MODULE,
501 .llseek = default_llseek,
502};
503
Kalle Valo763b8cd2013-09-01 11:22:21 +0300504static ssize_t ath10k_read_chip_id(struct file *file, char __user *user_buf,
505 size_t count, loff_t *ppos)
506{
507 struct ath10k *ar = file->private_data;
508 unsigned int len;
509 char buf[50];
510
511 len = scnprintf(buf, sizeof(buf), "0x%08x\n", ar->chip_id);
512
513 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
514}
515
516static const struct file_operations fops_chip_id = {
517 .read = ath10k_read_chip_id,
518 .open = simple_open,
519 .owner = THIS_MODULE,
520 .llseek = default_llseek,
521};
522
Kalle Valoa3d135e2013-09-03 11:44:10 +0300523static int ath10k_debug_htt_stats_req(struct ath10k *ar)
524{
525 u64 cookie;
526 int ret;
527
528 lockdep_assert_held(&ar->conf_mutex);
529
530 if (ar->debug.htt_stats_mask == 0)
531 /* htt stats are disabled */
532 return 0;
533
534 if (ar->state != ATH10K_STATE_ON)
535 return 0;
536
537 cookie = get_jiffies_64();
538
539 ret = ath10k_htt_h2t_stats_req(&ar->htt, ar->debug.htt_stats_mask,
540 cookie);
541 if (ret) {
542 ath10k_warn("failed to send htt stats request: %d\n", ret);
543 return ret;
544 }
545
546 queue_delayed_work(ar->workqueue, &ar->debug.htt_stats_dwork,
547 msecs_to_jiffies(ATH10K_DEBUG_HTT_STATS_INTERVAL));
548
549 return 0;
550}
551
552static void ath10k_debug_htt_stats_dwork(struct work_struct *work)
553{
554 struct ath10k *ar = container_of(work, struct ath10k,
555 debug.htt_stats_dwork.work);
556
557 mutex_lock(&ar->conf_mutex);
558
559 ath10k_debug_htt_stats_req(ar);
560
561 mutex_unlock(&ar->conf_mutex);
562}
563
564static ssize_t ath10k_read_htt_stats_mask(struct file *file,
565 char __user *user_buf,
566 size_t count, loff_t *ppos)
567{
568 struct ath10k *ar = file->private_data;
569 char buf[32];
570 unsigned int len;
571
572 len = scnprintf(buf, sizeof(buf), "%lu\n", ar->debug.htt_stats_mask);
573
574 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
575}
576
577static ssize_t ath10k_write_htt_stats_mask(struct file *file,
578 const char __user *user_buf,
579 size_t count, loff_t *ppos)
580{
581 struct ath10k *ar = file->private_data;
582 unsigned long mask;
583 int ret;
584
585 ret = kstrtoul_from_user(user_buf, count, 0, &mask);
586 if (ret)
587 return ret;
588
589 /* max 8 bit masks (for now) */
590 if (mask > 0xff)
591 return -E2BIG;
592
593 mutex_lock(&ar->conf_mutex);
594
595 ar->debug.htt_stats_mask = mask;
596
597 ret = ath10k_debug_htt_stats_req(ar);
598 if (ret)
599 goto out;
600
601 ret = count;
602
603out:
604 mutex_unlock(&ar->conf_mutex);
605
606 return ret;
607}
608
609static const struct file_operations fops_htt_stats_mask = {
610 .read = ath10k_read_htt_stats_mask,
611 .write = ath10k_write_htt_stats_mask,
612 .open = simple_open,
613 .owner = THIS_MODULE,
614 .llseek = default_llseek,
615};
616
Kalle Valodb66ea02013-09-03 11:44:03 +0300617int ath10k_debug_start(struct ath10k *ar)
618{
Kalle Valoa3d135e2013-09-03 11:44:10 +0300619 int ret;
620
Kalle Valo60631c52013-10-08 21:45:25 +0300621 lockdep_assert_held(&ar->conf_mutex);
622
Kalle Valoa3d135e2013-09-03 11:44:10 +0300623 ret = ath10k_debug_htt_stats_req(ar);
624 if (ret)
625 /* continue normally anyway, this isn't serious */
626 ath10k_warn("failed to start htt stats workqueue: %d\n", ret);
627
Kalle Valodb66ea02013-09-03 11:44:03 +0300628 return 0;
629}
630
631void ath10k_debug_stop(struct ath10k *ar)
632{
Kalle Valo60631c52013-10-08 21:45:25 +0300633 lockdep_assert_held(&ar->conf_mutex);
634
635 /* Must not use _sync to avoid deadlock, we do that in
636 * ath10k_debug_destroy(). The check for htt_stats_mask is to avoid
637 * warning from del_timer(). */
638 if (ar->debug.htt_stats_mask != 0)
639 cancel_delayed_work(&ar->debug.htt_stats_dwork);
Kalle Valodb66ea02013-09-03 11:44:03 +0300640}
641
Kalle Valo5e3dd152013-06-12 20:52:10 +0300642int ath10k_debug_create(struct ath10k *ar)
643{
644 ar->debug.debugfs_phy = debugfs_create_dir("ath10k",
645 ar->hw->wiphy->debugfsdir);
646
647 if (!ar->debug.debugfs_phy)
648 return -ENOMEM;
649
Kalle Valoa3d135e2013-09-03 11:44:10 +0300650 INIT_DELAYED_WORK(&ar->debug.htt_stats_dwork,
651 ath10k_debug_htt_stats_dwork);
652
Kalle Valo5e3dd152013-06-12 20:52:10 +0300653 init_completion(&ar->debug.event_stats_compl);
654
655 debugfs_create_file("fw_stats", S_IRUSR, ar->debug.debugfs_phy, ar,
656 &fops_fw_stats);
657
658 debugfs_create_file("wmi_services", S_IRUSR, ar->debug.debugfs_phy, ar,
659 &fops_wmi_services);
660
Michal Kazior278c4a82013-07-22 14:08:51 +0200661 debugfs_create_file("simulate_fw_crash", S_IRUSR, ar->debug.debugfs_phy,
662 ar, &fops_simulate_fw_crash);
663
Kalle Valo763b8cd2013-09-01 11:22:21 +0300664 debugfs_create_file("chip_id", S_IRUSR, ar->debug.debugfs_phy,
665 ar, &fops_chip_id);
666
Kalle Valoa3d135e2013-09-03 11:44:10 +0300667 debugfs_create_file("htt_stats_mask", S_IRUSR, ar->debug.debugfs_phy,
668 ar, &fops_htt_stats_mask);
669
Kalle Valo5e3dd152013-06-12 20:52:10 +0300670 return 0;
671}
Kalle Valodb66ea02013-09-03 11:44:03 +0300672
Kalle Valo60631c52013-10-08 21:45:25 +0300673void ath10k_debug_destroy(struct ath10k *ar)
674{
675 cancel_delayed_work_sync(&ar->debug.htt_stats_dwork);
676}
677
Kalle Valo5e3dd152013-06-12 20:52:10 +0300678#endif /* CONFIG_ATH10K_DEBUGFS */
679
680#ifdef CONFIG_ATH10K_DEBUG
681void ath10k_dbg(enum ath10k_debug_mask mask, const char *fmt, ...)
682{
683 struct va_format vaf;
684 va_list args;
685
686 va_start(args, fmt);
687
688 vaf.fmt = fmt;
689 vaf.va = &args;
690
691 if (ath10k_debug_mask & mask)
692 ath10k_printk(KERN_DEBUG, "%pV", &vaf);
693
694 trace_ath10k_log_dbg(mask, &vaf);
695
696 va_end(args);
697}
698EXPORT_SYMBOL(ath10k_dbg);
699
700void ath10k_dbg_dump(enum ath10k_debug_mask mask,
701 const char *msg, const char *prefix,
702 const void *buf, size_t len)
703{
704 if (ath10k_debug_mask & mask) {
705 if (msg)
706 ath10k_dbg(mask, "%s\n", msg);
707
708 print_hex_dump_bytes(prefix, DUMP_PREFIX_OFFSET, buf, len);
709 }
710
711 /* tracing code doesn't like null strings :/ */
712 trace_ath10k_log_dbg_dump(msg ? msg : "", prefix ? prefix : "",
713 buf, len);
714}
715EXPORT_SYMBOL(ath10k_dbg_dump);
716
717#endif /* CONFIG_ATH10K_DEBUG */