blob: a9191517f5ecdd420ecbae0a79a5420527dcecc3 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
2 * Copyright (c) 2002-2015 The Linux Foundation. All rights reserved.
3 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28/*===========================================================================
29
30 dfs.c
31
32 OVERVIEW:
33
34 Source code borrowed from QCA_MAIN DFS module
35
36 DEPENDENCIES:
37
38 Are listed for each API below.
39
40 ===========================================================================*/
41
42/*===========================================================================
43
44 EDIT HISTORY FOR FILE
45
46 This section contains comments describing changes made to the module.
47 Notice that changes are listed in reverse chronological order.
48
49 when who what, where, why
50 ---------- --- --------------------------------------------------------
51
52 ===========================================================================*/
53
54#include <osdep.h>
55
56#ifndef ATH_SUPPORT_DFS
57#define ATH_SUPPORT_DFS 1
58
59/* #include "if_athioctl.h" */
60/* #include "if_athvar.h" */
61#include "dfs_ioctl.h"
62#include "dfs.h"
63
64int domainoverride = DFS_UNINIT_DOMAIN;
65
66/*
67** channel switch announcement (CSA)
68** usenol=1 (default) make CSA and switch to a new channel on radar detect
69** usenol=0, make CSA with next channel same as current on radar detect
70** usenol=2, no CSA and stay on the same channel on radar detect
71**/
72
73int usenol = 1;
74uint32_t dfs_debug_level = ATH_DEBUG_DFS;
75
76#if 0 /* the code to call this is curently commented-out below */
77/*
78 * Mark a channel as having interference detected upon it.
79 *
80 * This adds the interference marker to both the primary and
81 * extension channel.
82 *
83 * XXX TODO: make the NOL and channel interference logic a bit smarter
84 * so only the channel with the radar event is marked, rather than
85 * both the primary and extension.
86 */
87static void
88dfs_channel_mark_radar(struct ath_dfs *dfs, struct ieee80211_channel *chan)
89{
90 struct ieee80211_channel_list chan_info;
91 int i;
92
93 /* chan->ic_flagext |= CHANNEL_INTERFERENCE; */
94
95 /*
96 * If radar is detected in 40MHz mode, add both the primary and the
97 * extension channels to the NOL. chan is the channel data we return
98 * to the ath_dev layer which passes it on to the 80211 layer.
99 * As we want the AP to change channels and send out a CSA,
100 * we always pass back the primary channel data to the ath_dev layer.
101 */
102 if ((dfs->dfs_rinfo.rn_use_nol == 1) &&
103 (dfs->ic->ic_opmode == IEEE80211_M_HOSTAP ||
104 dfs->ic->ic_opmode == IEEE80211_M_IBSS)) {
105 chan_info.cl_nchans = 0;
106 dfs->ic->ic_get_ext_chan_info(dfs->ic, &chan_info);
107
108 for (i = 0; i < chan_info.cl_nchans; i++) {
109 if (chan_info.cl_channels[i] == NULL) {
110 DFS_PRINTK("%s: NULL channel\n", __func__);
111 } else {
112 chan_info.cl_channels[i]->ic_flagext |=
113 CHANNEL_INTERFERENCE;
114 dfs_nol_addchan(dfs, chan_info.cl_channels[i],
115 dfs->ath_dfs_nol_timeout);
116 }
117 }
118
119 /*
120 * Update the umac/driver channels with the new NOL information.
121 */
122 dfs_nol_update(dfs);
123 }
124}
125#endif /* #if 0 */
126
127static os_timer_func(dfs_task)
128{
129 struct ieee80211com *ic;
130 struct ath_dfs *dfs = NULL;
131
132 OS_GET_TIMER_ARG(ic, struct ieee80211com *);
133 dfs = (struct ath_dfs *)ic->ic_dfs;
134 /*
135 * XXX no locking?!
136 */
137 if (dfs_process_radarevent(dfs, ic->ic_curchan)) {
138#ifndef ATH_DFS_RADAR_DETECTION_ONLY
139
140 /*
141 * This marks the channel (and the extension channel, if HT40) as
142 * having seen a radar event. It marks CHAN_INTERFERENCE and
143 * will add it to the local NOL implementation.
144 *
145 * This is only done for 'usenol=1', as the other two modes
146 * don't do radar notification or CAC/CSA/NOL; it just notes
147 * there was a radar.
148 */
149
150 if (dfs->dfs_rinfo.rn_use_nol == 1) {
151 /* dfs_channel_mark_radar(dfs, ic->ic_curchan); */
152 }
153#endif /* ATH_DFS_RADAR_DETECTION_ONLY */
154
155 /*
156 * This calls into the umac DFS code, which sets the umac related
157 * radar flags and begins the channel change machinery.
158 *
159 * XXX TODO: the umac NOL code isn't used, but IEEE80211_CHAN_RADAR
160 * still gets set. Since the umac NOL code isn't used, that flag
161 * is never cleared. This needs to be fixed. See EV 105776.
162 */
163 if (dfs->dfs_rinfo.rn_use_nol == 1) {
164 ic->ic_dfs_notify_radar(ic, ic->ic_curchan);
165 } else if (dfs->dfs_rinfo.rn_use_nol == 0) {
166 /*
167 * For the test mode, don't do a CSA here; but setup the
168 * test timer so we get a CSA _back_ to the original channel.
169 */
170 OS_CANCEL_TIMER(&dfs->ath_dfstesttimer);
171 dfs->ath_dfstest = 1;
172 dfs->ath_dfstest_ieeechan = ic->ic_curchan->ic_ieee;
173 dfs->ath_dfstesttime = 1; /* 1ms */
174 OS_SET_TIMER(&dfs->ath_dfstesttimer,
175 dfs->ath_dfstesttime);
176 }
177 }
178 dfs->ath_radar_tasksched = 0;
179}
180
181static os_timer_func(dfs_testtimer_task)
182{
183 struct ieee80211com *ic;
184 struct ath_dfs *dfs = NULL;
185
186 OS_GET_TIMER_ARG(ic, struct ieee80211com *);
187 dfs = (struct ath_dfs *)ic->ic_dfs;
188
189 /* XXX no locking? */
190 dfs->ath_dfstest = 0;
191
192 /*
193 * Flip the channel back to the original channel.
194 * Make sure this is done properly with a CSA.
195 */
196 DFS_PRINTK("%s: go back to channel %d\n",
197 __func__, dfs->ath_dfstest_ieeechan);
198
199 /*
200 * XXX The mere existence of this method indirection
201 * to a umac function means this code belongs in
202 * the driver, _not_ here. Please fix this!
203 */
204 ic->ic_start_csa(ic, dfs->ath_dfstest_ieeechan);
205}
206
207static int dfs_get_debug_info(struct ieee80211com *ic, int type, void *data)
208{
209 struct ath_dfs *dfs = (struct ath_dfs *)ic->ic_dfs;
210 if (data) {
211 *(uint32_t *) data = dfs->dfs_proc_phyerr;
212 }
213 return (int)dfs->dfs_proc_phyerr;
214}
215
216int dfs_attach(struct ieee80211com *ic)
217{
218 int i, n;
219 struct ath_dfs *dfs = (struct ath_dfs *)ic->ic_dfs;
220 struct ath_dfs_radar_tab_info radar_info;
221
222 if (dfs != NULL) {
223 /*DFS_DPRINTK(dfs, ATH_DEBUG_DFS1,
224 "%s: ic_dfs was not NULL\n",
225 __func__);
226 */
227 return 1;
228 }
229
230 dfs =
231 (struct ath_dfs *)os_malloc(NULL, sizeof(struct ath_dfs),
232 GFP_ATOMIC);
233
234 if (dfs == NULL) {
235 /*DFS_DPRINTK(dfs, ATH_DEBUG_DFS1,
236 "%s: ath_dfs allocation failed\n", __func__); */
237 return 1;
238 }
239
240 OS_MEMZERO(dfs, sizeof(struct ath_dfs));
241
242 ic->ic_dfs = (void *)dfs;
243
244 dfs->ic = ic;
245
246 ic->ic_dfs_debug = dfs_get_debug_info;
247#ifndef ATH_DFS_RADAR_DETECTION_ONLY
248 dfs->dfs_nol = NULL;
249#endif
250
251 /*
252 * Zero out radar_info. It's possible that the attach function won't
253 * fetch an initial regulatory configuration; you really do want to
254 * ensure that the contents indicates there aren't any filters.
255 */
256 OS_MEMZERO(&radar_info, sizeof(radar_info));
257 ic->ic_dfs_attach(ic, &dfs->dfs_caps, &radar_info);
258 dfs_clear_stats(ic);
259 dfs->dfs_event_log_on = 0;
260 OS_INIT_TIMER(NULL, &(dfs->ath_dfs_task_timer), dfs_task, (void *)(ic),
261 CDF_TIMER_TYPE_SW);
262#ifndef ATH_DFS_RADAR_DETECTION_ONLY
263 OS_INIT_TIMER(NULL, &(dfs->ath_dfstesttimer), dfs_testtimer_task,
264 (void *)ic, CDF_TIMER_TYPE_SW);
265 dfs->ath_dfs_cac_time = ATH_DFS_WAIT_MS;
266 dfs->ath_dfstesttime = ATH_DFS_TEST_RETURN_PERIOD_MS;
267#endif
268 ATH_DFSQ_LOCK_INIT(dfs);
269 STAILQ_INIT(&dfs->dfs_radarq);
270 ATH_ARQ_LOCK_INIT(dfs);
271 STAILQ_INIT(&dfs->dfs_arq);
272 STAILQ_INIT(&(dfs->dfs_eventq));
273 ATH_DFSEVENTQ_LOCK_INIT(dfs);
274 dfs->events = (struct dfs_event *)os_malloc(NULL,
275 sizeof(struct dfs_event) *
276 DFS_MAX_EVENTS, GFP_ATOMIC);
277 if (dfs->events == NULL) {
278 OS_FREE(dfs);
279 ic->ic_dfs = NULL;
280 DFS_PRINTK("%s: events allocation failed\n", __func__);
281 return 1;
282 }
283 for (i = 0; i < DFS_MAX_EVENTS; i++) {
284 STAILQ_INSERT_TAIL(&(dfs->dfs_eventq), &dfs->events[i],
285 re_list);
286 }
287
288 dfs->pulses =
289 (struct dfs_pulseline *)os_malloc(NULL,
290 sizeof(struct dfs_pulseline),
291 GFP_ATOMIC);
292 if (dfs->pulses == NULL) {
293 OS_FREE(dfs->events);
294 dfs->events = NULL;
295 OS_FREE(dfs);
296 ic->ic_dfs = NULL;
297 DFS_PRINTK("%s: pulse buffer allocation failed\n", __func__);
298 return 1;
299 }
300
301 dfs->pulses->pl_lastelem = DFS_MAX_PULSE_BUFFER_MASK;
302
303 /* Allocate memory for radar filters */
304 for (n = 0; n < DFS_MAX_RADAR_TYPES; n++) {
305 dfs->dfs_radarf[n] =
306 (struct dfs_filtertype *)os_malloc(NULL,
307 sizeof(struct
308 dfs_filtertype),
309 GFP_ATOMIC);
310 if (dfs->dfs_radarf[n] == NULL) {
311 DFS_PRINTK
312 ("%s: cannot allocate memory for radar filter types\n",
313 __func__);
314 goto bad1;
315 }
316 OS_MEMZERO(dfs->dfs_radarf[n], sizeof(struct dfs_filtertype));
317 }
318 /* Allocate memory for radar table */
319 dfs->dfs_radartable =
320 (int8_t * *) os_malloc(NULL, 256 * sizeof(int8_t *), GFP_ATOMIC);
321 if (dfs->dfs_radartable == NULL) {
322 DFS_PRINTK("%s: cannot allocate memory for radar table\n",
323 __func__);
324 goto bad1;
325 }
326 for (n = 0; n < 256; n++) {
327 dfs->dfs_radartable[n] =
328 os_malloc(NULL, DFS_MAX_RADAR_OVERLAP * sizeof(int8_t),
329 GFP_ATOMIC);
330 if (dfs->dfs_radartable[n] == NULL) {
331 DFS_PRINTK
332 ("%s: cannot allocate memory for radar table entry\n",
333 __func__);
334 goto bad2;
335 }
336 }
337
338 if (usenol == 0)
339 DFS_PRINTK("%s: NOL disabled\n", __func__);
340 else if (usenol == 2)
341 DFS_PRINTK("%s: NOL disabled; no CSA\n", __func__);
342
343 dfs->dfs_rinfo.rn_use_nol = usenol;
344
345 /* Init the cached extension channel busy for false alarm reduction */
346 dfs->dfs_rinfo.ext_chan_busy_ts = ic->ic_get_TSF64(ic);
347 dfs->dfs_rinfo.dfs_ext_chan_busy = 0;
348 /* Init the Bin5 chirping related data */
349 dfs->dfs_rinfo.dfs_bin5_chirp_ts = dfs->dfs_rinfo.ext_chan_busy_ts;
350 dfs->dfs_rinfo.dfs_last_bin5_dur = MAX_BIN5_DUR;
351 dfs->dfs_b5radars = NULL;
352
353 /*
354 * If dfs_init_radar_filters() fails, we can abort here and
355 * reconfigure when the first valid channel + radar config
356 * is available.
357 */
358 if (dfs_init_radar_filters(ic, &radar_info)) {
359 DFS_PRINTK(" %s: Radar Filter Intialization Failed \n",
360 __func__);
361 return 1;
362 }
363
364 dfs->ath_dfs_false_rssi_thres = RSSI_POSSIBLY_FALSE;
365 dfs->ath_dfs_peak_mag = SEARCH_FFT_REPORT_PEAK_MAG_THRSH;
366 dfs->dfs_phyerr_freq_min = 0x7fffffff;
367 dfs->dfs_phyerr_freq_max = 0;
368 dfs->dfs_phyerr_queued_count = 0;
369 dfs->dfs_phyerr_w53_counter = 0;
370 dfs->dfs_pri_multiplier = 2;
371
372 dfs->ath_dfs_nol_timeout = DFS_NOL_TIMEOUT_S;
373 return 0;
374
375bad2:
376 OS_FREE(dfs->dfs_radartable);
377 dfs->dfs_radartable = NULL;
378bad1:
379 for (n = 0; n < DFS_MAX_RADAR_TYPES; n++) {
380 if (dfs->dfs_radarf[n] != NULL) {
381 OS_FREE(dfs->dfs_radarf[n]);
382 dfs->dfs_radarf[n] = NULL;
383 }
384 }
385 if (dfs->pulses) {
386 OS_FREE(dfs->pulses);
387 dfs->pulses = NULL;
388 }
389 if (dfs->events) {
390 OS_FREE(dfs->events);
391 dfs->events = NULL;
392 }
393
394 if (ic->ic_dfs) {
395 OS_FREE(ic->ic_dfs);
396 ic->ic_dfs = NULL;
397 }
398 return 1;
399#undef N
400}
401
402void dfs_detach(struct ieee80211com *ic)
403{
404 struct ath_dfs *dfs = (struct ath_dfs *)ic->ic_dfs;
405 int n, empty;
406
407 if (dfs == NULL) {
408 DFS_DPRINTK(dfs, ATH_DEBUG_DFS1, "%s: ic_dfs is NULL\n",
409 __func__);
410 return;
411 }
412
413 /* Bug 29099 make sure all outstanding timers are cancelled */
414
415 if (dfs->ath_radar_tasksched) {
416 OS_CANCEL_TIMER(&dfs->ath_dfs_task_timer);
417 dfs->ath_radar_tasksched = 0;
418 }
419
420 if (dfs->ath_dfstest) {
421 OS_CANCEL_TIMER(&dfs->ath_dfstesttimer);
422 dfs->ath_dfstest = 0;
423 }
424#if 0
425#ifndef ATH_DFS_RADAR_DETECTION_ONLY
426 if (dfs->ic_dfswait) {
427 OS_CANCEL_TIMER(&dfs->ic_dfswaittimer);
428 dfs->ath_dfswait = 0;
429 }
430
431 OS_CANCEL_TIMER(&dfs->sc_dfs_war_timer);
432 if (dfs->dfs_nol != NULL) {
433 struct dfs_nolelem *nol, *next;
434 nol = dfs->dfs_nol;
435 /* Bug 29099 - each NOL element has its own timer, cancel it and
436 free the element */
437 while (nol != NULL) {
438 OS_CANCEL_TIMER(&nol->nol_timer);
439 next = nol->nol_next;
440 OS_FREE(nol);
441 nol = next;
442 }
443 dfs->dfs_nol = NULL;
444 }
445#endif
446#endif
447 /* Return radar events to free q */
448 dfs_reset_radarq(dfs);
449 dfs_reset_alldelaylines(dfs);
450
451 /* Free up pulse log */
452 if (dfs->pulses != NULL) {
453 OS_FREE(dfs->pulses);
454 dfs->pulses = NULL;
455 }
456
457 for (n = 0; n < DFS_MAX_RADAR_TYPES; n++) {
458 if (dfs->dfs_radarf[n] != NULL) {
459 OS_FREE(dfs->dfs_radarf[n]);
460 dfs->dfs_radarf[n] = NULL;
461 }
462 }
463
464 if (dfs->dfs_radartable != NULL) {
465 for (n = 0; n < 256; n++) {
466 if (dfs->dfs_radartable[n] != NULL) {
467 OS_FREE(dfs->dfs_radartable[n]);
468 dfs->dfs_radartable[n] = NULL;
469 }
470 }
471 OS_FREE(dfs->dfs_radartable);
472 dfs->dfs_radartable = NULL;
473#ifndef ATH_DFS_RADAR_DETECTION_ONLY
474 dfs->ath_dfs_isdfsregdomain = 0;
475#endif
476 }
477
478 if (dfs->dfs_b5radars != NULL) {
479 OS_FREE(dfs->dfs_b5radars);
480 dfs->dfs_b5radars = NULL;
481 }
482
483/* Commenting out since all the ar functions are obsolete and
484 * the function definition has been removed as part of dfs_ar.c
485 * dfs_reset_ar(dfs);
486 */
487 ATH_ARQ_LOCK(dfs);
488 empty = STAILQ_EMPTY(&(dfs->dfs_arq));
489 ATH_ARQ_UNLOCK(dfs);
490 if (!empty) {
491/*
492 * Commenting out since all the ar functions are obsolete and
493 * the function definition has been removed as part of dfs_ar.c
494 *
495 * dfs_reset_arq(dfs);
496 */
497 }
498 if (dfs->events != NULL) {
499 OS_FREE(dfs->events);
500 dfs->events = NULL;
501 }
502 dfs_nol_timer_cleanup(dfs);
503 OS_FREE(dfs);
504
505 /* XXX? */
506 ic->ic_dfs = NULL;
507}
508
509/*
510 * This is called each time a channel change occurs, to (potentially) enable
511 * the radar code.
512 */
513int dfs_radar_disable(struct ieee80211com *ic)
514{
515 struct ath_dfs *dfs = (struct ath_dfs *)ic->ic_dfs;
516#ifdef ATH_ENABLE_AR
517 dfs->dfs_proc_phyerr &= ~DFS_AR_EN;
518#endif
519 dfs->dfs_proc_phyerr &= ~DFS_RADAR_EN;
520 return 0;
521}
522
523/*
524 * This is called each time a channel change occurs, to (potentially) enable
525 * the radar code.
526 */
527int dfs_radar_enable(struct ieee80211com *ic,
528 struct ath_dfs_radar_tab_info *radar_info)
529{
530 int is_ext_ch;
531 int is_fastclk = 0;
532 int radar_filters_init_status = 0;
533 /* uint32_t rfilt; */
534 struct ath_dfs *dfs;
535 struct dfs_state *rs_pri, *rs_ext;
536 struct ieee80211_channel *chan = ic->ic_curchan, *ext_ch = NULL;
537 is_ext_ch = IEEE80211_IS_CHAN_11N_HT40(ic->ic_curchan);
538 dfs = (struct ath_dfs *)ic->ic_dfs;
539 rs_pri = NULL;
540 rs_ext = NULL;
541#if 0
542 int i;
543#endif
544 if (dfs == NULL) {
545 DFS_DPRINTK(dfs, ATH_DEBUG_DFS, "%s: ic_dfs is NULL\n",
546 __func__);
547
548 return -EIO;
549 }
550 ic->ic_dfs_disable(ic);
551
552 /*
553 * Setting country code might change the DFS domain
554 * so initialize the DFS Radar filters
555 */
556 radar_filters_init_status = dfs_init_radar_filters(ic, radar_info);
557
558 /*
559 * dfs_init_radar_filters() returns 1 on failure and
560 * 0 on success.
561 */
562 if (DFS_STATUS_FAIL == radar_filters_init_status) {
563 CDF_TRACE(CDF_MODULE_ID_SAP, CDF_TRACE_LEVEL_ERROR,
564 "%s[%d]: DFS Radar Filters Initialization Failed",
565 __func__, __LINE__);
566 return -EIO;
567 }
568
569 if ((ic->ic_opmode == IEEE80211_M_HOSTAP
570 || ic->ic_opmode == IEEE80211_M_IBSS)) {
571
572 if (IEEE80211_IS_CHAN_DFS(chan)) {
573
574 uint8_t index_pri, index_ext;
575#ifdef ATH_ENABLE_AR
576 dfs->dfs_proc_phyerr |= DFS_AR_EN;
577#endif
578 dfs->dfs_proc_phyerr |= DFS_RADAR_EN;
579
580 if (is_ext_ch) {
581 ext_ch = ieee80211_get_extchan(ic);
582 }
583 dfs_reset_alldelaylines(dfs);
584
585 rs_pri = dfs_getchanstate(dfs, &index_pri, 0);
586 if (ext_ch) {
587 rs_ext = dfs_getchanstate(dfs, &index_ext, 1);
588 }
589 if (rs_pri != NULL
590 && ((ext_ch == NULL) || (rs_ext != NULL))) {
591 struct ath_dfs_phyerr_param pe;
592
593 OS_MEMSET(&pe, '\0', sizeof(pe));
594
595 if (index_pri != dfs->dfs_curchan_radindex)
596 dfs_reset_alldelaylines(dfs);
597
598 dfs->dfs_curchan_radindex = (int16_t) index_pri;
599 dfs->dfs_pri_multiplier_ini =
600 radar_info->dfs_pri_multiplier;
601
602 if (rs_ext)
603 dfs->dfs_extchan_radindex =
604 (int16_t) index_ext;
605
606 ath_dfs_phyerr_param_copy(&pe,
607 &rs_pri->rs_param);
608 DFS_DPRINTK(dfs, ATH_DEBUG_DFS3,
609 "%s: firpwr=%d, rssi=%d, height=%d, "
610 "prssi=%d, inband=%d, relpwr=%d, "
611 "relstep=%d, maxlen=%d\n",
612 __func__,
613 pe.pe_firpwr,
614 pe.pe_rrssi,
615 pe.pe_height,
616 pe.pe_prssi,
617 pe.pe_inband,
618 pe.pe_relpwr,
619 pe.pe_relstep, pe.pe_maxlen);
620
621 ic->ic_dfs_enable(ic, &is_fastclk, &pe);
622 DFS_DPRINTK(dfs, ATH_DEBUG_DFS,
623 "Enabled radar detection on channel %d\n",
624 chan->ic_freq);
625 dfs->dur_multiplier =
626 is_fastclk ? DFS_FAST_CLOCK_MULTIPLIER :
627 DFS_NO_FAST_CLOCK_MULTIPLIER;
628 DFS_DPRINTK(dfs, ATH_DEBUG_DFS3,
629 "%s: duration multiplier is %d\n",
630 __func__, dfs->dur_multiplier);
631 } else
632 DFS_DPRINTK(dfs, ATH_DEBUG_DFS,
633 "%s: No more radar states left\n",
634 __func__);
635 }
636 }
637
638 return DFS_STATUS_SUCCESS;
639}
640
641int
642dfs_control(struct ieee80211com *ic, u_int id,
643 void *indata, uint32_t insize, void *outdata, uint32_t *outsize)
644{
645 int error = 0;
646 struct ath_dfs_phyerr_param peout;
647 struct ath_dfs *dfs = (struct ath_dfs *)ic->ic_dfs;
648 struct dfs_ioctl_params *dfsparams;
649 uint32_t val = 0;
650#ifndef ATH_DFS_RADAR_DETECTION_ONLY
651 struct dfsreq_nolinfo *nol;
652 uint32_t *data = NULL;
653#endif /* ATH_DFS_RADAR_DETECTION_ONLY */
654 int i;
655
656 if (dfs == NULL) {
657 error = -EINVAL;
658 DFS_DPRINTK(dfs, ATH_DEBUG_DFS1, "%s DFS is null\n", __func__);
659 goto bad;
660 }
661
662 switch (id) {
663 case DFS_SET_THRESH:
664 if (insize < sizeof(struct dfs_ioctl_params) || !indata) {
665 DFS_DPRINTK(dfs, ATH_DEBUG_DFS1,
666 "%s: insize=%d, expected=%zu bytes, indata=%p\n",
667 __func__, insize,
668 sizeof(struct dfs_ioctl_params), indata);
669 error = -EINVAL;
670 break;
671 }
672 dfsparams = (struct dfs_ioctl_params *)indata;
673 if (!dfs_set_thresholds
674 (ic, DFS_PARAM_FIRPWR, dfsparams->dfs_firpwr))
675 error = -EINVAL;
676 if (!dfs_set_thresholds
677 (ic, DFS_PARAM_RRSSI, dfsparams->dfs_rrssi))
678 error = -EINVAL;
679 if (!dfs_set_thresholds
680 (ic, DFS_PARAM_HEIGHT, dfsparams->dfs_height))
681 error = -EINVAL;
682 if (!dfs_set_thresholds
683 (ic, DFS_PARAM_PRSSI, dfsparams->dfs_prssi))
684 error = -EINVAL;
685 if (!dfs_set_thresholds
686 (ic, DFS_PARAM_INBAND, dfsparams->dfs_inband))
687 error = -EINVAL;
688 /* 5413 speicfic */
689 if (!dfs_set_thresholds
690 (ic, DFS_PARAM_RELPWR, dfsparams->dfs_relpwr))
691 error = -EINVAL;
692 if (!dfs_set_thresholds
693 (ic, DFS_PARAM_RELSTEP, dfsparams->dfs_relstep))
694 error = -EINVAL;
695 if (!dfs_set_thresholds
696 (ic, DFS_PARAM_MAXLEN, dfsparams->dfs_maxlen))
697 error = -EINVAL;
698 break;
699 case DFS_GET_THRESH:
700 if (!outdata || !outsize
701 || *outsize < sizeof(struct dfs_ioctl_params)) {
702 error = -EINVAL;
703 break;
704 }
705 *outsize = sizeof(struct dfs_ioctl_params);
706 dfsparams = (struct dfs_ioctl_params *)outdata;
707
708 /*
709 * Fetch the DFS thresholds using the internal representation.
710 */
711 (void)dfs_get_thresholds(ic, &peout);
712
713 /*
714 * Convert them to the dfs IOCTL representation.
715 */
716 ath_dfs_dfsparam_to_ioctlparam(&peout, dfsparams);
717 break;
718 case DFS_RADARDETECTS:
719 if (!outdata || !outsize || *outsize < sizeof(uint32_t)) {
720 error = -EINVAL;
721 break;
722 }
723 *outsize = sizeof(uint32_t);
724 *((uint32_t *) outdata) = dfs->ath_dfs_stats.num_radar_detects;
725 break;
726 case DFS_DISABLE_DETECT:
727 dfs->dfs_proc_phyerr &= ~DFS_RADAR_EN;
728 dfs->ic->ic_dfs_state.ignore_dfs = 1;
729 DFS_PRINTK("%s enable detects, ignore_dfs %d\n",
730 __func__, dfs->ic->ic_dfs_state.ignore_dfs);
731 break;
732 case DFS_ENABLE_DETECT:
733 dfs->dfs_proc_phyerr |= DFS_RADAR_EN;
734 dfs->ic->ic_dfs_state.ignore_dfs = 0;
735 DFS_PRINTK("%s enable detects, ignore_dfs %d\n",
736 __func__, dfs->ic->ic_dfs_state.ignore_dfs);
737 break;
738 case DFS_DISABLE_FFT:
739 /* UMACDFS: TODO: val = ath_hal_dfs_config_fft(sc->sc_ah, false); */
740 DFS_PRINTK("%s TODO disable FFT val=0x%x \n", __func__, val);
741 break;
742 case DFS_ENABLE_FFT:
743 /* UMACDFS TODO: val = ath_hal_dfs_config_fft(sc->sc_ah, true); */
744 DFS_PRINTK("%s TODO enable FFT val=0x%x \n", __func__, val);
745 break;
746 case DFS_SET_DEBUG_LEVEL:
747 if (insize < sizeof(uint32_t) || !indata) {
748 error = -EINVAL;
749 break;
750 }
751 dfs->dfs_debug_mask = *(uint32_t *) indata;
752 DFS_PRINTK("%s debug level now = 0x%x \n",
753 __func__, dfs->dfs_debug_mask);
754 if (dfs->dfs_debug_mask & ATH_DEBUG_DFS3) {
755 /* Enable debug Radar Event */
756 dfs->dfs_event_log_on = 1;
757 } else {
758 dfs->dfs_event_log_on = 0;
759 }
760 break;
761 case DFS_SET_FALSE_RSSI_THRES:
762 if (insize < sizeof(uint32_t) || !indata) {
763 error = -EINVAL;
764 break;
765 }
766 dfs->ath_dfs_false_rssi_thres = *(uint32_t *) indata;
767 DFS_PRINTK("%s false RSSI threshold now = 0x%x \n",
768 __func__, dfs->ath_dfs_false_rssi_thres);
769 break;
770 case DFS_SET_PEAK_MAG:
771 if (insize < sizeof(uint32_t) || !indata) {
772 error = -EINVAL;
773 break;
774 }
775 dfs->ath_dfs_peak_mag = *(uint32_t *) indata;
776 DFS_PRINTK("%s peak_mag now = 0x%x \n",
777 __func__, dfs->ath_dfs_peak_mag);
778 break;
779 case DFS_IGNORE_CAC:
780 if (insize < sizeof(uint32_t) || !indata) {
781 error = -EINVAL;
782 break;
783 }
784 if (*(uint32_t *) indata) {
785 dfs->ic->ic_dfs_state.ignore_cac = 1;
786 } else {
787 dfs->ic->ic_dfs_state.ignore_cac = 0;
788 }
789 DFS_PRINTK("%s ignore cac = 0x%x \n",
790 __func__, dfs->ic->ic_dfs_state.ignore_cac);
791 break;
792 case DFS_SET_NOL_TIMEOUT:
793 if (insize < sizeof(uint32_t) || !indata) {
794 error = -EINVAL;
795 break;
796 }
797 if (*(int *)indata) {
798 dfs->ath_dfs_nol_timeout = *(int *)indata;
799 } else {
800 dfs->ath_dfs_nol_timeout = DFS_NOL_TIMEOUT_S;
801 }
802 DFS_PRINTK("%s nol timeout = %d sec \n",
803 __func__, dfs->ath_dfs_nol_timeout);
804 break;
805#ifndef ATH_DFS_RADAR_DETECTION_ONLY
806 case DFS_MUTE_TIME:
807 if (insize < sizeof(uint32_t) || !indata) {
808 error = -EINVAL;
809 break;
810 }
811 data = (uint32_t *) indata;
812 dfs->ath_dfstesttime = *data;
813 dfs->ath_dfstesttime *= (1000); /* convert sec into ms */
814 break;
815 case DFS_GET_USENOL:
816 if (!outdata || !outsize || *outsize < sizeof(uint32_t)) {
817 error = -EINVAL;
818 break;
819 }
820 *outsize = sizeof(uint32_t);
821 *((uint32_t *) outdata) = dfs->dfs_rinfo.rn_use_nol;
822
823 for (i = 0;
824 (i < DFS_EVENT_LOG_SIZE) && (i < dfs->dfs_event_log_count);
825 i++) {
826 /* DFS_DPRINTK(sc, ATH_DEBUG_DFS,"ts=%llu diff_ts=%u rssi=%u dur=%u\n", dfs->radar_log[i].ts, dfs->radar_log[i].diff_ts, dfs->radar_log[i].rssi, dfs->radar_log[i].dur); */
827
828 }
829 dfs->dfs_event_log_count = 0;
830 dfs->dfs_phyerr_count = 0;
831 dfs->dfs_phyerr_reject_count = 0;
832 dfs->dfs_phyerr_queued_count = 0;
833 dfs->dfs_phyerr_freq_min = 0x7fffffff;
834 dfs->dfs_phyerr_freq_max = 0;
835 break;
836 case DFS_SET_USENOL:
837 if (insize < sizeof(uint32_t) || !indata) {
838 error = -EINVAL;
839 break;
840 }
841 dfs->dfs_rinfo.rn_use_nol = *(uint32_t *) indata;
842 /* iwpriv markdfs in linux can do the same thing... */
843 break;
844 case DFS_GET_NOL:
845 if (!outdata || !outsize
846 || *outsize < sizeof(struct dfsreq_nolinfo)) {
847 error = -EINVAL;
848 break;
849 }
850 *outsize = sizeof(struct dfsreq_nolinfo);
851 nol = (struct dfsreq_nolinfo *)outdata;
852 dfs_get_nol(dfs, (struct dfsreq_nolelem *)nol->dfs_nol,
853 &nol->ic_nchans);
854 dfs_print_nol(dfs);
855 break;
856 case DFS_SET_NOL:
857 if (insize < sizeof(struct dfsreq_nolinfo) || !indata) {
858 error = -EINVAL;
859 break;
860 }
861 nol = (struct dfsreq_nolinfo *)indata;
862 dfs_set_nol(dfs, (struct dfsreq_nolelem *)nol->dfs_nol,
863 nol->ic_nchans);
864 break;
865
866 case DFS_SHOW_NOL:
867 dfs_print_nol(dfs);
868 break;
869 case DFS_BANGRADAR:
870#if 0 /* MERGE_TBD */
871 if (sc->sc_nostabeacons) {
872 printk("No radar detection Enabled \n");
873 break;
874 }
875#endif
876 dfs->dfs_bangradar = 1;
877 dfs->ath_radar_tasksched = 1;
878 OS_SET_TIMER(&dfs->ath_dfs_task_timer, 0);
879 break;
880#endif /* ATH_DFS_RADAR_DETECTION_ONLY */
881 default:
882 error = -EINVAL;
883 }
884bad:
885 return error;
886}
887
888int
889dfs_set_thresholds(struct ieee80211com *ic, const uint32_t threshtype,
890 const uint32_t value)
891{
892 struct ath_dfs *dfs = (struct ath_dfs *)ic->ic_dfs;
893 int16_t chanindex;
894 struct dfs_state *rs;
895 struct ath_dfs_phyerr_param pe;
896 int is_fastclk = 0; /* XXX throw-away */
897
898 if (dfs == NULL) {
899 DFS_DPRINTK(dfs, ATH_DEBUG_DFS1, "%s: ic_dfs is NULL\n",
900 __func__);
901 return 0;
902 }
903
904 chanindex = dfs->dfs_curchan_radindex;
905 if ((chanindex < 0) || (chanindex >= DFS_NUM_RADAR_STATES)) {
906 DFS_DPRINTK(dfs, ATH_DEBUG_DFS1,
907 "%s: chanindex = %d, DFS_NUM_RADAR_STATES=%d\n",
908 __func__, chanindex, DFS_NUM_RADAR_STATES);
909 return 0;
910 }
911
912 DFS_DPRINTK(dfs, ATH_DEBUG_DFS,
913 "%s: threshtype=%d, value=%d\n", __func__, threshtype,
914 value);
915
916 ath_dfs_phyerr_init_noval(&pe);
917
918 rs = &(dfs->dfs_radar[chanindex]);
919 switch (threshtype) {
920 case DFS_PARAM_FIRPWR:
921 rs->rs_param.pe_firpwr = (int32_t) value;
922 pe.pe_firpwr = value;
923 break;
924 case DFS_PARAM_RRSSI:
925 rs->rs_param.pe_rrssi = value;
926 pe.pe_rrssi = value;
927 break;
928 case DFS_PARAM_HEIGHT:
929 rs->rs_param.pe_height = value;
930 pe.pe_height = value;
931 break;
932 case DFS_PARAM_PRSSI:
933 rs->rs_param.pe_prssi = value;
934 pe.pe_prssi = value;
935 break;
936 case DFS_PARAM_INBAND:
937 rs->rs_param.pe_inband = value;
938 pe.pe_inband = value;
939 break;
940 /* 5413 specific */
941 case DFS_PARAM_RELPWR:
942 rs->rs_param.pe_relpwr = value;
943 pe.pe_relpwr = value;
944 break;
945 case DFS_PARAM_RELSTEP:
946 rs->rs_param.pe_relstep = value;
947 pe.pe_relstep = value;
948 break;
949 case DFS_PARAM_MAXLEN:
950 rs->rs_param.pe_maxlen = value;
951 pe.pe_maxlen = value;
952 break;
953 default:
954 DFS_DPRINTK(dfs, ATH_DEBUG_DFS1,
955 "%s: unknown threshtype (%d)\n",
956 __func__, threshtype);
957 break;
958 }
959
960 /*
961 * The driver layer dfs_enable routine is tasked with translating
962 * values from the global format to the per-device (HAL, offload)
963 * format.
964 */
965 ic->ic_dfs_enable(ic, &is_fastclk, &pe);
966 return 1;
967}
968
969int
970dfs_get_thresholds(struct ieee80211com *ic, struct ath_dfs_phyerr_param *param)
971{
972 /* UMACDFS : TODO:ath_hal_getdfsthresh(sc->sc_ah, param); */
973
974 OS_MEMZERO(param, sizeof(*param));
975
976 (void)ic->ic_dfs_get_thresholds(ic, param);
977
978 return 1;
979}
980
981uint16_t dfs_usenol(struct ieee80211com *ic)
982{
983 struct ath_dfs *dfs = (struct ath_dfs *)ic->ic_dfs;
984 return dfs ? (uint16_t) dfs->dfs_rinfo.rn_use_nol : 0;
985}
986
987uint16_t dfs_isdfsregdomain(struct ieee80211com *ic)
988{
989 struct ath_dfs *dfs = (struct ath_dfs *)ic->ic_dfs;
990 return dfs ? dfs->dfsdomain : 0;
991}
992
993#endif /* ATH_UPPORT_DFS */