blob: 85266dc37eb04e3d977c10735fdc8f98e216e42e [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Anurag Chouhan6d760662016-02-20 16:05:43 +05302 * Copyright (c) 2002-2016 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28/*===========================================================================
29
Anurag Chouhan6d760662016-02-20 16:05:43 +053030 dfs.c
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080031
32 OVERVIEW:
33
34 Source code borrowed from QCA_MAIN DFS module
35
36 DEPENDENCIES:
37
38 Are listed for each API below.
39
40 ===========================================================================*/
41
42/*===========================================================================
43
Anurag Chouhan6d760662016-02-20 16:05:43 +053044 EDIT HISTORY FOR FILE
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080045
46 This section contains comments describing changes made to the module.
47 Notice that changes are listed in reverse chronological order.
48
49 when who what, where, why
50 ---------- --- --------------------------------------------------------
51
52 ===========================================================================*/
53
54#include <osdep.h>
55
56#ifndef ATH_SUPPORT_DFS
57#define ATH_SUPPORT_DFS 1
58
59/* #include "if_athioctl.h" */
60/* #include "if_athvar.h" */
61#include "dfs_ioctl.h"
62#include "dfs.h"
63
64int domainoverride = DFS_UNINIT_DOMAIN;
65
66/*
67** channel switch announcement (CSA)
68** usenol=1 (default) make CSA and switch to a new channel on radar detect
69** usenol=0, make CSA with next channel same as current on radar detect
70** usenol=2, no CSA and stay on the same channel on radar detect
71**/
72
73int usenol = 1;
74uint32_t dfs_debug_level = ATH_DEBUG_DFS;
75
76#if 0 /* the code to call this is curently commented-out below */
77/*
78 * Mark a channel as having interference detected upon it.
79 *
80 * This adds the interference marker to both the primary and
81 * extension channel.
82 *
83 * XXX TODO: make the NOL and channel interference logic a bit smarter
84 * so only the channel with the radar event is marked, rather than
85 * both the primary and extension.
86 */
87static void
Chandrasekaran, Manishekar22a7e1e2015-11-05 10:38:49 +053088dfs_channel_mark_radar(struct ath_dfs *dfs, struct dfs_ieee80211_channel *chan)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080089{
90 struct ieee80211_channel_list chan_info;
91 int i;
92
93 /* chan->ic_flagext |= CHANNEL_INTERFERENCE; */
94
95 /*
96 * If radar is detected in 40MHz mode, add both the primary and the
97 * extension channels to the NOL. chan is the channel data we return
98 * to the ath_dev layer which passes it on to the 80211 layer.
99 * As we want the AP to change channels and send out a CSA,
100 * we always pass back the primary channel data to the ath_dev layer.
101 */
102 if ((dfs->dfs_rinfo.rn_use_nol == 1) &&
103 (dfs->ic->ic_opmode == IEEE80211_M_HOSTAP ||
104 dfs->ic->ic_opmode == IEEE80211_M_IBSS)) {
105 chan_info.cl_nchans = 0;
106 dfs->ic->ic_get_ext_chan_info(dfs->ic, &chan_info);
107
108 for (i = 0; i < chan_info.cl_nchans; i++) {
109 if (chan_info.cl_channels[i] == NULL) {
110 DFS_PRINTK("%s: NULL channel\n", __func__);
111 } else {
112 chan_info.cl_channels[i]->ic_flagext |=
113 CHANNEL_INTERFERENCE;
114 dfs_nol_addchan(dfs, chan_info.cl_channels[i],
115 dfs->ath_dfs_nol_timeout);
116 }
117 }
118
119 /*
120 * Update the umac/driver channels with the new NOL information.
121 */
122 dfs_nol_update(dfs);
123 }
124}
125#endif /* #if 0 */
126
127static os_timer_func(dfs_task)
128{
129 struct ieee80211com *ic;
130 struct ath_dfs *dfs = NULL;
131
132 OS_GET_TIMER_ARG(ic, struct ieee80211com *);
133 dfs = (struct ath_dfs *)ic->ic_dfs;
134 /*
135 * XXX no locking?!
136 */
137 if (dfs_process_radarevent(dfs, ic->ic_curchan)) {
138#ifndef ATH_DFS_RADAR_DETECTION_ONLY
139
140 /*
141 * This marks the channel (and the extension channel, if HT40) as
142 * having seen a radar event. It marks CHAN_INTERFERENCE and
143 * will add it to the local NOL implementation.
144 *
145 * This is only done for 'usenol=1', as the other two modes
146 * don't do radar notification or CAC/CSA/NOL; it just notes
147 * there was a radar.
148 */
149
150 if (dfs->dfs_rinfo.rn_use_nol == 1) {
151 /* dfs_channel_mark_radar(dfs, ic->ic_curchan); */
152 }
153#endif /* ATH_DFS_RADAR_DETECTION_ONLY */
154
155 /*
156 * This calls into the umac DFS code, which sets the umac related
157 * radar flags and begins the channel change machinery.
158 *
159 * XXX TODO: the umac NOL code isn't used, but IEEE80211_CHAN_RADAR
160 * still gets set. Since the umac NOL code isn't used, that flag
161 * is never cleared. This needs to be fixed. See EV 105776.
162 */
163 if (dfs->dfs_rinfo.rn_use_nol == 1) {
164 ic->ic_dfs_notify_radar(ic, ic->ic_curchan);
165 } else if (dfs->dfs_rinfo.rn_use_nol == 0) {
166 /*
167 * For the test mode, don't do a CSA here; but setup the
168 * test timer so we get a CSA _back_ to the original channel.
169 */
170 OS_CANCEL_TIMER(&dfs->ath_dfstesttimer);
171 dfs->ath_dfstest = 1;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530172 qdf_spin_lock_bh(&ic->chan_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800173 dfs->ath_dfstest_ieeechan = ic->ic_curchan->ic_ieee;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530174 qdf_spin_unlock_bh(&ic->chan_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800175 dfs->ath_dfstesttime = 1; /* 1ms */
176 OS_SET_TIMER(&dfs->ath_dfstesttimer,
177 dfs->ath_dfstesttime);
178 }
179 }
180 dfs->ath_radar_tasksched = 0;
181}
182
183static os_timer_func(dfs_testtimer_task)
184{
185 struct ieee80211com *ic;
186 struct ath_dfs *dfs = NULL;
187
188 OS_GET_TIMER_ARG(ic, struct ieee80211com *);
189 dfs = (struct ath_dfs *)ic->ic_dfs;
190
191 /* XXX no locking? */
192 dfs->ath_dfstest = 0;
193
194 /*
195 * Flip the channel back to the original channel.
196 * Make sure this is done properly with a CSA.
197 */
198 DFS_PRINTK("%s: go back to channel %d\n",
199 __func__, dfs->ath_dfstest_ieeechan);
200
201 /*
202 * XXX The mere existence of this method indirection
203 * to a umac function means this code belongs in
204 * the driver, _not_ here. Please fix this!
205 */
206 ic->ic_start_csa(ic, dfs->ath_dfstest_ieeechan);
207}
208
209static int dfs_get_debug_info(struct ieee80211com *ic, int type, void *data)
210{
211 struct ath_dfs *dfs = (struct ath_dfs *)ic->ic_dfs;
212 if (data) {
213 *(uint32_t *) data = dfs->dfs_proc_phyerr;
214 }
215 return (int)dfs->dfs_proc_phyerr;
216}
217
218int dfs_attach(struct ieee80211com *ic)
219{
220 int i, n;
221 struct ath_dfs *dfs = (struct ath_dfs *)ic->ic_dfs;
222 struct ath_dfs_radar_tab_info radar_info;
223
224 if (dfs != NULL) {
225 /*DFS_DPRINTK(dfs, ATH_DEBUG_DFS1,
226 "%s: ic_dfs was not NULL\n",
227 __func__);
228 */
229 return 1;
230 }
231
Anurag Chouhan2ed1fce2016-02-22 15:07:01 +0530232 dfs = (struct ath_dfs *)os_malloc(NULL, sizeof(struct ath_dfs),
233 GFP_ATOMIC);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800234
235 if (dfs == NULL) {
236 /*DFS_DPRINTK(dfs, ATH_DEBUG_DFS1,
237 "%s: ath_dfs allocation failed\n", __func__); */
238 return 1;
239 }
240
241 OS_MEMZERO(dfs, sizeof(struct ath_dfs));
242
243 ic->ic_dfs = (void *)dfs;
244
245 dfs->ic = ic;
246
247 ic->ic_dfs_debug = dfs_get_debug_info;
248#ifndef ATH_DFS_RADAR_DETECTION_ONLY
249 dfs->dfs_nol = NULL;
250#endif
251
252 /*
253 * Zero out radar_info. It's possible that the attach function won't
254 * fetch an initial regulatory configuration; you really do want to
255 * ensure that the contents indicates there aren't any filters.
256 */
257 OS_MEMZERO(&radar_info, sizeof(radar_info));
258 ic->ic_dfs_attach(ic, &dfs->dfs_caps, &radar_info);
259 dfs_clear_stats(ic);
260 dfs->dfs_event_log_on = 0;
261 OS_INIT_TIMER(NULL, &(dfs->ath_dfs_task_timer), dfs_task, (void *)(ic),
Anurag Chouhan6d760662016-02-20 16:05:43 +0530262 QDF_TIMER_TYPE_SW);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800263#ifndef ATH_DFS_RADAR_DETECTION_ONLY
264 OS_INIT_TIMER(NULL, &(dfs->ath_dfstesttimer), dfs_testtimer_task,
Anurag Chouhan6d760662016-02-20 16:05:43 +0530265 (void *)ic, QDF_TIMER_TYPE_SW);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800266 dfs->ath_dfs_cac_time = ATH_DFS_WAIT_MS;
267 dfs->ath_dfstesttime = ATH_DFS_TEST_RETURN_PERIOD_MS;
268#endif
269 ATH_DFSQ_LOCK_INIT(dfs);
270 STAILQ_INIT(&dfs->dfs_radarq);
271 ATH_ARQ_LOCK_INIT(dfs);
272 STAILQ_INIT(&dfs->dfs_arq);
273 STAILQ_INIT(&(dfs->dfs_eventq));
274 ATH_DFSEVENTQ_LOCK_INIT(dfs);
275 dfs->events = (struct dfs_event *)os_malloc(NULL,
276 sizeof(struct dfs_event) *
277 DFS_MAX_EVENTS, GFP_ATOMIC);
278 if (dfs->events == NULL) {
279 OS_FREE(dfs);
280 ic->ic_dfs = NULL;
281 DFS_PRINTK("%s: events allocation failed\n", __func__);
282 return 1;
283 }
284 for (i = 0; i < DFS_MAX_EVENTS; i++) {
285 STAILQ_INSERT_TAIL(&(dfs->dfs_eventq), &dfs->events[i],
286 re_list);
287 }
288
289 dfs->pulses =
290 (struct dfs_pulseline *)os_malloc(NULL,
291 sizeof(struct dfs_pulseline),
292 GFP_ATOMIC);
293 if (dfs->pulses == NULL) {
294 OS_FREE(dfs->events);
295 dfs->events = NULL;
296 OS_FREE(dfs);
297 ic->ic_dfs = NULL;
298 DFS_PRINTK("%s: pulse buffer allocation failed\n", __func__);
299 return 1;
300 }
301
Rakesh Sunkif7f82e52015-12-14 15:09:40 -0800302 /*
303 * If the chip supports DFS-3 then allocate
304 * memory for pulses for extension segment.
305 */
306 if (ic->dfs_hw_bd_id != DFS_HWBD_QCA6174) {
307 dfs->pulses_ext_seg = (struct dfs_pulseline *)
308 os_malloc(NULL,
309 sizeof(struct dfs_pulseline),
310 GFP_ATOMIC);
311 if (dfs->pulses_ext_seg == NULL) {
312 OS_FREE(dfs->events);
313 dfs->events = NULL;
314 OS_FREE(dfs);
315 ic->ic_dfs = NULL;
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530316 QDF_TRACE(QDF_MODULE_ID_SAP, QDF_TRACE_LEVEL_ERROR,
Rakesh Sunkif7f82e52015-12-14 15:09:40 -0800317 "%s[%d]: pulse buffer allocation failed",
318 __func__, __LINE__);
319 return 1;
320 }
321 dfs->pulses_ext_seg->pl_lastelem = DFS_MAX_PULSE_BUFFER_MASK;
322 }
323
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800324 dfs->pulses->pl_lastelem = DFS_MAX_PULSE_BUFFER_MASK;
325
326 /* Allocate memory for radar filters */
327 for (n = 0; n < DFS_MAX_RADAR_TYPES; n++) {
328 dfs->dfs_radarf[n] =
329 (struct dfs_filtertype *)os_malloc(NULL,
330 sizeof(struct
331 dfs_filtertype),
332 GFP_ATOMIC);
333 if (dfs->dfs_radarf[n] == NULL) {
334 DFS_PRINTK
335 ("%s: cannot allocate memory for radar filter types\n",
336 __func__);
337 goto bad1;
338 }
339 OS_MEMZERO(dfs->dfs_radarf[n], sizeof(struct dfs_filtertype));
340 }
341 /* Allocate memory for radar table */
342 dfs->dfs_radartable =
Anurag Chouhan6d760662016-02-20 16:05:43 +0530343 (int8_t **) os_malloc(NULL, 256 * sizeof(int8_t *), GFP_ATOMIC);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800344 if (dfs->dfs_radartable == NULL) {
345 DFS_PRINTK("%s: cannot allocate memory for radar table\n",
346 __func__);
347 goto bad1;
348 }
349 for (n = 0; n < 256; n++) {
350 dfs->dfs_radartable[n] =
351 os_malloc(NULL, DFS_MAX_RADAR_OVERLAP * sizeof(int8_t),
352 GFP_ATOMIC);
353 if (dfs->dfs_radartable[n] == NULL) {
354 DFS_PRINTK
355 ("%s: cannot allocate memory for radar table entry\n",
356 __func__);
357 goto bad2;
358 }
359 }
360
361 if (usenol == 0)
362 DFS_PRINTK("%s: NOL disabled\n", __func__);
363 else if (usenol == 2)
364 DFS_PRINTK("%s: NOL disabled; no CSA\n", __func__);
365
366 dfs->dfs_rinfo.rn_use_nol = usenol;
367
368 /* Init the cached extension channel busy for false alarm reduction */
369 dfs->dfs_rinfo.ext_chan_busy_ts = ic->ic_get_TSF64(ic);
370 dfs->dfs_rinfo.dfs_ext_chan_busy = 0;
371 /* Init the Bin5 chirping related data */
372 dfs->dfs_rinfo.dfs_bin5_chirp_ts = dfs->dfs_rinfo.ext_chan_busy_ts;
373 dfs->dfs_rinfo.dfs_last_bin5_dur = MAX_BIN5_DUR;
374 dfs->dfs_b5radars = NULL;
Rakesh Sunkif7f82e52015-12-14 15:09:40 -0800375 if (ic->dfs_hw_bd_id != DFS_HWBD_QCA6174) {
376 dfs->dfs_rinfo.dfs_last_bin5_dur_ext_seg = MAX_BIN5_DUR;
377 dfs->dfs_b5radars_ext_seg = NULL;
378 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800379
380 /*
381 * If dfs_init_radar_filters() fails, we can abort here and
382 * reconfigure when the first valid channel + radar config
383 * is available.
384 */
385 if (dfs_init_radar_filters(ic, &radar_info)) {
386 DFS_PRINTK(" %s: Radar Filter Intialization Failed \n",
387 __func__);
388 return 1;
389 }
390
391 dfs->ath_dfs_false_rssi_thres = RSSI_POSSIBLY_FALSE;
392 dfs->ath_dfs_peak_mag = SEARCH_FFT_REPORT_PEAK_MAG_THRSH;
393 dfs->dfs_phyerr_freq_min = 0x7fffffff;
394 dfs->dfs_phyerr_freq_max = 0;
395 dfs->dfs_phyerr_queued_count = 0;
396 dfs->dfs_phyerr_w53_counter = 0;
397 dfs->dfs_pri_multiplier = 2;
398
399 dfs->ath_dfs_nol_timeout = DFS_NOL_TIMEOUT_S;
400 return 0;
401
402bad2:
403 OS_FREE(dfs->dfs_radartable);
404 dfs->dfs_radartable = NULL;
405bad1:
406 for (n = 0; n < DFS_MAX_RADAR_TYPES; n++) {
407 if (dfs->dfs_radarf[n] != NULL) {
408 OS_FREE(dfs->dfs_radarf[n]);
409 dfs->dfs_radarf[n] = NULL;
410 }
411 }
412 if (dfs->pulses) {
413 OS_FREE(dfs->pulses);
414 dfs->pulses = NULL;
415 }
Rakesh Sunkif7f82e52015-12-14 15:09:40 -0800416 if (dfs->pulses_ext_seg &&
417 ic->dfs_hw_bd_id != DFS_HWBD_QCA6174) {
418 OS_FREE(dfs->pulses_ext_seg);
419 dfs->pulses_ext_seg = NULL;
420 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800421 if (dfs->events) {
422 OS_FREE(dfs->events);
423 dfs->events = NULL;
424 }
425
426 if (ic->ic_dfs) {
427 OS_FREE(ic->ic_dfs);
428 ic->ic_dfs = NULL;
429 }
430 return 1;
431#undef N
432}
433
434void dfs_detach(struct ieee80211com *ic)
435{
436 struct ath_dfs *dfs = (struct ath_dfs *)ic->ic_dfs;
437 int n, empty;
438
439 if (dfs == NULL) {
440 DFS_DPRINTK(dfs, ATH_DEBUG_DFS1, "%s: ic_dfs is NULL\n",
441 __func__);
442 return;
443 }
444
445 /* Bug 29099 make sure all outstanding timers are cancelled */
446
447 if (dfs->ath_radar_tasksched) {
448 OS_CANCEL_TIMER(&dfs->ath_dfs_task_timer);
449 dfs->ath_radar_tasksched = 0;
450 }
451
452 if (dfs->ath_dfstest) {
453 OS_CANCEL_TIMER(&dfs->ath_dfstesttimer);
454 dfs->ath_dfstest = 0;
455 }
456#if 0
457#ifndef ATH_DFS_RADAR_DETECTION_ONLY
458 if (dfs->ic_dfswait) {
459 OS_CANCEL_TIMER(&dfs->ic_dfswaittimer);
460 dfs->ath_dfswait = 0;
461 }
462
463 OS_CANCEL_TIMER(&dfs->sc_dfs_war_timer);
464 if (dfs->dfs_nol != NULL) {
465 struct dfs_nolelem *nol, *next;
466 nol = dfs->dfs_nol;
467 /* Bug 29099 - each NOL element has its own timer, cancel it and
468 free the element */
469 while (nol != NULL) {
470 OS_CANCEL_TIMER(&nol->nol_timer);
471 next = nol->nol_next;
472 OS_FREE(nol);
473 nol = next;
474 }
475 dfs->dfs_nol = NULL;
476 }
477#endif
478#endif
Rakesh Sunkif7f82e52015-12-14 15:09:40 -0800479
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800480 /* Return radar events to free q */
481 dfs_reset_radarq(dfs);
Rakesh Sunkif7f82e52015-12-14 15:09:40 -0800482 dfs_reset_alldelaylines(dfs, DFS_80P80_SEG0);
483 if (ic->dfs_hw_bd_id != DFS_HWBD_QCA6174)
484 dfs_reset_alldelaylines(dfs, DFS_80P80_SEG1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800485
486 /* Free up pulse log */
487 if (dfs->pulses != NULL) {
488 OS_FREE(dfs->pulses);
489 dfs->pulses = NULL;
490 }
491
Rakesh Sunkif7f82e52015-12-14 15:09:40 -0800492 if (dfs->pulses_ext_seg != NULL &&
493 ic->dfs_hw_bd_id != DFS_HWBD_QCA6174) {
494 OS_FREE(dfs->pulses_ext_seg);
495 dfs->pulses_ext_seg = NULL;
496 }
497
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800498 for (n = 0; n < DFS_MAX_RADAR_TYPES; n++) {
499 if (dfs->dfs_radarf[n] != NULL) {
500 OS_FREE(dfs->dfs_radarf[n]);
501 dfs->dfs_radarf[n] = NULL;
502 }
503 }
504
505 if (dfs->dfs_radartable != NULL) {
506 for (n = 0; n < 256; n++) {
507 if (dfs->dfs_radartable[n] != NULL) {
508 OS_FREE(dfs->dfs_radartable[n]);
509 dfs->dfs_radartable[n] = NULL;
510 }
511 }
512 OS_FREE(dfs->dfs_radartable);
513 dfs->dfs_radartable = NULL;
514#ifndef ATH_DFS_RADAR_DETECTION_ONLY
515 dfs->ath_dfs_isdfsregdomain = 0;
516#endif
517 }
518
519 if (dfs->dfs_b5radars != NULL) {
520 OS_FREE(dfs->dfs_b5radars);
521 dfs->dfs_b5radars = NULL;
522 }
Rakesh Sunkif7f82e52015-12-14 15:09:40 -0800523 if (dfs->dfs_b5radars_ext_seg != NULL &&
524 ic->dfs_hw_bd_id != DFS_HWBD_QCA6174) {
525 OS_FREE(dfs->dfs_b5radars_ext_seg);
526 dfs->dfs_b5radars_ext_seg = NULL;
527 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800528
529/* Commenting out since all the ar functions are obsolete and
530 * the function definition has been removed as part of dfs_ar.c
531 * dfs_reset_ar(dfs);
532 */
533 ATH_ARQ_LOCK(dfs);
534 empty = STAILQ_EMPTY(&(dfs->dfs_arq));
535 ATH_ARQ_UNLOCK(dfs);
536 if (!empty) {
537/*
538 * Commenting out since all the ar functions are obsolete and
539 * the function definition has been removed as part of dfs_ar.c
540 *
541 * dfs_reset_arq(dfs);
542 */
543 }
544 if (dfs->events != NULL) {
545 OS_FREE(dfs->events);
546 dfs->events = NULL;
547 }
548 dfs_nol_timer_cleanup(dfs);
549 OS_FREE(dfs);
550
551 /* XXX? */
552 ic->ic_dfs = NULL;
553}
554
555/*
556 * This is called each time a channel change occurs, to (potentially) enable
557 * the radar code.
558 */
559int dfs_radar_disable(struct ieee80211com *ic)
560{
561 struct ath_dfs *dfs = (struct ath_dfs *)ic->ic_dfs;
562#ifdef ATH_ENABLE_AR
563 dfs->dfs_proc_phyerr &= ~DFS_AR_EN;
564#endif
565 dfs->dfs_proc_phyerr &= ~DFS_RADAR_EN;
566 return 0;
567}
568
569/*
570 * This is called each time a channel change occurs, to (potentially) enable
571 * the radar code.
572 */
573int dfs_radar_enable(struct ieee80211com *ic,
574 struct ath_dfs_radar_tab_info *radar_info)
575{
576 int is_ext_ch;
577 int is_fastclk = 0;
578 int radar_filters_init_status = 0;
579 /* uint32_t rfilt; */
580 struct ath_dfs *dfs;
581 struct dfs_state *rs_pri, *rs_ext;
Chandrasekaran, Manishekar22a7e1e2015-11-05 10:38:49 +0530582 struct dfs_ieee80211_channel *chan = ic->ic_curchan, *ext_ch = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800583 is_ext_ch = IEEE80211_IS_CHAN_11N_HT40(ic->ic_curchan);
584 dfs = (struct ath_dfs *)ic->ic_dfs;
585 rs_pri = NULL;
586 rs_ext = NULL;
587#if 0
588 int i;
589#endif
590 if (dfs == NULL) {
591 DFS_DPRINTK(dfs, ATH_DEBUG_DFS, "%s: ic_dfs is NULL\n",
592 __func__);
593
594 return -EIO;
595 }
596 ic->ic_dfs_disable(ic);
597
598 /*
599 * Setting country code might change the DFS domain
600 * so initialize the DFS Radar filters
601 */
602 radar_filters_init_status = dfs_init_radar_filters(ic, radar_info);
603
604 /*
605 * dfs_init_radar_filters() returns 1 on failure and
606 * 0 on success.
607 */
608 if (DFS_STATUS_FAIL == radar_filters_init_status) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530609 QDF_TRACE(QDF_MODULE_ID_SAP, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800610 "%s[%d]: DFS Radar Filters Initialization Failed",
611 __func__, __LINE__);
612 return -EIO;
613 }
614
615 if ((ic->ic_opmode == IEEE80211_M_HOSTAP
616 || ic->ic_opmode == IEEE80211_M_IBSS)) {
617
618 if (IEEE80211_IS_CHAN_DFS(chan)) {
619
620 uint8_t index_pri, index_ext;
621#ifdef ATH_ENABLE_AR
622 dfs->dfs_proc_phyerr |= DFS_AR_EN;
623#endif
624 dfs->dfs_proc_phyerr |= DFS_RADAR_EN;
625
626 if (is_ext_ch) {
627 ext_ch = ieee80211_get_extchan(ic);
628 }
Rakesh Sunkif7f82e52015-12-14 15:09:40 -0800629 dfs_reset_alldelaylines(dfs, DFS_80P80_SEG0);
630 /*
631 * Extension segment delaylines will be
632 * enabled only when SAP operates in 80p80
633 * and both the channels are DFS.
634 */
635 if (chan->ic_80p80_both_dfs)
636 dfs_reset_alldelaylines(dfs, DFS_80P80_SEG1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800637
638 rs_pri = dfs_getchanstate(dfs, &index_pri, 0);
639 if (ext_ch) {
640 rs_ext = dfs_getchanstate(dfs, &index_ext, 1);
641 }
642 if (rs_pri != NULL
643 && ((ext_ch == NULL) || (rs_ext != NULL))) {
644 struct ath_dfs_phyerr_param pe;
645
646 OS_MEMSET(&pe, '\0', sizeof(pe));
647
Rakesh Sunkif7f82e52015-12-14 15:09:40 -0800648 if (index_pri != dfs->dfs_curchan_radindex) {
649 dfs_reset_alldelaylines(dfs,
650 DFS_80P80_SEG0);
651 /*
652 * Reset only when ext segment is
653 * present
654 */
655 if (chan->ic_80p80_both_dfs)
656 dfs_reset_alldelaylines(dfs,
657 DFS_80P80_SEG1);
658 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800659 dfs->dfs_curchan_radindex = (int16_t) index_pri;
660 dfs->dfs_pri_multiplier_ini =
661 radar_info->dfs_pri_multiplier;
662
663 if (rs_ext)
664 dfs->dfs_extchan_radindex =
665 (int16_t) index_ext;
666
667 ath_dfs_phyerr_param_copy(&pe,
668 &rs_pri->rs_param);
669 DFS_DPRINTK(dfs, ATH_DEBUG_DFS3,
670 "%s: firpwr=%d, rssi=%d, height=%d, "
671 "prssi=%d, inband=%d, relpwr=%d, "
672 "relstep=%d, maxlen=%d\n",
673 __func__,
674 pe.pe_firpwr,
675 pe.pe_rrssi,
676 pe.pe_height,
677 pe.pe_prssi,
678 pe.pe_inband,
679 pe.pe_relpwr,
680 pe.pe_relstep, pe.pe_maxlen);
681
682 ic->ic_dfs_enable(ic, &is_fastclk, &pe);
683 DFS_DPRINTK(dfs, ATH_DEBUG_DFS,
684 "Enabled radar detection on channel %d\n",
685 chan->ic_freq);
686 dfs->dur_multiplier =
687 is_fastclk ? DFS_FAST_CLOCK_MULTIPLIER :
688 DFS_NO_FAST_CLOCK_MULTIPLIER;
689 DFS_DPRINTK(dfs, ATH_DEBUG_DFS3,
690 "%s: duration multiplier is %d\n",
691 __func__, dfs->dur_multiplier);
692 } else
693 DFS_DPRINTK(dfs, ATH_DEBUG_DFS,
694 "%s: No more radar states left\n",
695 __func__);
696 }
697 }
698
699 return DFS_STATUS_SUCCESS;
700}
701
702int
703dfs_control(struct ieee80211com *ic, u_int id,
704 void *indata, uint32_t insize, void *outdata, uint32_t *outsize)
705{
706 int error = 0;
707 struct ath_dfs_phyerr_param peout;
708 struct ath_dfs *dfs = (struct ath_dfs *)ic->ic_dfs;
709 struct dfs_ioctl_params *dfsparams;
710 uint32_t val = 0;
711#ifndef ATH_DFS_RADAR_DETECTION_ONLY
712 struct dfsreq_nolinfo *nol;
713 uint32_t *data = NULL;
714#endif /* ATH_DFS_RADAR_DETECTION_ONLY */
715 int i;
716
717 if (dfs == NULL) {
718 error = -EINVAL;
719 DFS_DPRINTK(dfs, ATH_DEBUG_DFS1, "%s DFS is null\n", __func__);
720 goto bad;
721 }
722
723 switch (id) {
724 case DFS_SET_THRESH:
725 if (insize < sizeof(struct dfs_ioctl_params) || !indata) {
726 DFS_DPRINTK(dfs, ATH_DEBUG_DFS1,
727 "%s: insize=%d, expected=%zu bytes, indata=%p\n",
728 __func__, insize,
729 sizeof(struct dfs_ioctl_params), indata);
730 error = -EINVAL;
731 break;
732 }
733 dfsparams = (struct dfs_ioctl_params *)indata;
734 if (!dfs_set_thresholds
735 (ic, DFS_PARAM_FIRPWR, dfsparams->dfs_firpwr))
736 error = -EINVAL;
737 if (!dfs_set_thresholds
738 (ic, DFS_PARAM_RRSSI, dfsparams->dfs_rrssi))
739 error = -EINVAL;
740 if (!dfs_set_thresholds
741 (ic, DFS_PARAM_HEIGHT, dfsparams->dfs_height))
742 error = -EINVAL;
743 if (!dfs_set_thresholds
744 (ic, DFS_PARAM_PRSSI, dfsparams->dfs_prssi))
745 error = -EINVAL;
746 if (!dfs_set_thresholds
747 (ic, DFS_PARAM_INBAND, dfsparams->dfs_inband))
748 error = -EINVAL;
749 /* 5413 speicfic */
750 if (!dfs_set_thresholds
751 (ic, DFS_PARAM_RELPWR, dfsparams->dfs_relpwr))
752 error = -EINVAL;
753 if (!dfs_set_thresholds
754 (ic, DFS_PARAM_RELSTEP, dfsparams->dfs_relstep))
755 error = -EINVAL;
756 if (!dfs_set_thresholds
757 (ic, DFS_PARAM_MAXLEN, dfsparams->dfs_maxlen))
758 error = -EINVAL;
759 break;
760 case DFS_GET_THRESH:
761 if (!outdata || !outsize
762 || *outsize < sizeof(struct dfs_ioctl_params)) {
763 error = -EINVAL;
764 break;
765 }
766 *outsize = sizeof(struct dfs_ioctl_params);
767 dfsparams = (struct dfs_ioctl_params *)outdata;
768
769 /*
770 * Fetch the DFS thresholds using the internal representation.
771 */
772 (void)dfs_get_thresholds(ic, &peout);
773
774 /*
775 * Convert them to the dfs IOCTL representation.
776 */
777 ath_dfs_dfsparam_to_ioctlparam(&peout, dfsparams);
778 break;
779 case DFS_RADARDETECTS:
780 if (!outdata || !outsize || *outsize < sizeof(uint32_t)) {
781 error = -EINVAL;
782 break;
783 }
784 *outsize = sizeof(uint32_t);
785 *((uint32_t *) outdata) = dfs->ath_dfs_stats.num_radar_detects;
786 break;
787 case DFS_DISABLE_DETECT:
788 dfs->dfs_proc_phyerr &= ~DFS_RADAR_EN;
789 dfs->ic->ic_dfs_state.ignore_dfs = 1;
790 DFS_PRINTK("%s enable detects, ignore_dfs %d\n",
791 __func__, dfs->ic->ic_dfs_state.ignore_dfs);
792 break;
793 case DFS_ENABLE_DETECT:
794 dfs->dfs_proc_phyerr |= DFS_RADAR_EN;
795 dfs->ic->ic_dfs_state.ignore_dfs = 0;
796 DFS_PRINTK("%s enable detects, ignore_dfs %d\n",
797 __func__, dfs->ic->ic_dfs_state.ignore_dfs);
798 break;
799 case DFS_DISABLE_FFT:
800 /* UMACDFS: TODO: val = ath_hal_dfs_config_fft(sc->sc_ah, false); */
801 DFS_PRINTK("%s TODO disable FFT val=0x%x \n", __func__, val);
802 break;
803 case DFS_ENABLE_FFT:
804 /* UMACDFS TODO: val = ath_hal_dfs_config_fft(sc->sc_ah, true); */
805 DFS_PRINTK("%s TODO enable FFT val=0x%x \n", __func__, val);
806 break;
807 case DFS_SET_DEBUG_LEVEL:
808 if (insize < sizeof(uint32_t) || !indata) {
809 error = -EINVAL;
810 break;
811 }
812 dfs->dfs_debug_mask = *(uint32_t *) indata;
813 DFS_PRINTK("%s debug level now = 0x%x \n",
814 __func__, dfs->dfs_debug_mask);
815 if (dfs->dfs_debug_mask & ATH_DEBUG_DFS3) {
816 /* Enable debug Radar Event */
817 dfs->dfs_event_log_on = 1;
818 } else {
819 dfs->dfs_event_log_on = 0;
820 }
821 break;
822 case DFS_SET_FALSE_RSSI_THRES:
823 if (insize < sizeof(uint32_t) || !indata) {
824 error = -EINVAL;
825 break;
826 }
827 dfs->ath_dfs_false_rssi_thres = *(uint32_t *) indata;
828 DFS_PRINTK("%s false RSSI threshold now = 0x%x \n",
829 __func__, dfs->ath_dfs_false_rssi_thres);
830 break;
831 case DFS_SET_PEAK_MAG:
832 if (insize < sizeof(uint32_t) || !indata) {
833 error = -EINVAL;
834 break;
835 }
836 dfs->ath_dfs_peak_mag = *(uint32_t *) indata;
837 DFS_PRINTK("%s peak_mag now = 0x%x \n",
838 __func__, dfs->ath_dfs_peak_mag);
839 break;
840 case DFS_IGNORE_CAC:
841 if (insize < sizeof(uint32_t) || !indata) {
842 error = -EINVAL;
843 break;
844 }
845 if (*(uint32_t *) indata) {
846 dfs->ic->ic_dfs_state.ignore_cac = 1;
847 } else {
848 dfs->ic->ic_dfs_state.ignore_cac = 0;
849 }
850 DFS_PRINTK("%s ignore cac = 0x%x \n",
851 __func__, dfs->ic->ic_dfs_state.ignore_cac);
852 break;
853 case DFS_SET_NOL_TIMEOUT:
854 if (insize < sizeof(uint32_t) || !indata) {
855 error = -EINVAL;
856 break;
857 }
858 if (*(int *)indata) {
859 dfs->ath_dfs_nol_timeout = *(int *)indata;
860 } else {
861 dfs->ath_dfs_nol_timeout = DFS_NOL_TIMEOUT_S;
862 }
863 DFS_PRINTK("%s nol timeout = %d sec \n",
864 __func__, dfs->ath_dfs_nol_timeout);
865 break;
866#ifndef ATH_DFS_RADAR_DETECTION_ONLY
867 case DFS_MUTE_TIME:
868 if (insize < sizeof(uint32_t) || !indata) {
869 error = -EINVAL;
870 break;
871 }
872 data = (uint32_t *) indata;
873 dfs->ath_dfstesttime = *data;
874 dfs->ath_dfstesttime *= (1000); /* convert sec into ms */
875 break;
876 case DFS_GET_USENOL:
877 if (!outdata || !outsize || *outsize < sizeof(uint32_t)) {
878 error = -EINVAL;
879 break;
880 }
881 *outsize = sizeof(uint32_t);
882 *((uint32_t *) outdata) = dfs->dfs_rinfo.rn_use_nol;
883
884 for (i = 0;
885 (i < DFS_EVENT_LOG_SIZE) && (i < dfs->dfs_event_log_count);
886 i++) {
887 /* DFS_DPRINTK(sc, ATH_DEBUG_DFS,"ts=%llu diff_ts=%u rssi=%u dur=%u\n", dfs->radar_log[i].ts, dfs->radar_log[i].diff_ts, dfs->radar_log[i].rssi, dfs->radar_log[i].dur); */
888
889 }
890 dfs->dfs_event_log_count = 0;
891 dfs->dfs_phyerr_count = 0;
892 dfs->dfs_phyerr_reject_count = 0;
893 dfs->dfs_phyerr_queued_count = 0;
894 dfs->dfs_phyerr_freq_min = 0x7fffffff;
895 dfs->dfs_phyerr_freq_max = 0;
896 break;
897 case DFS_SET_USENOL:
898 if (insize < sizeof(uint32_t) || !indata) {
899 error = -EINVAL;
900 break;
901 }
902 dfs->dfs_rinfo.rn_use_nol = *(uint32_t *) indata;
903 /* iwpriv markdfs in linux can do the same thing... */
904 break;
905 case DFS_GET_NOL:
906 if (!outdata || !outsize
907 || *outsize < sizeof(struct dfsreq_nolinfo)) {
908 error = -EINVAL;
909 break;
910 }
911 *outsize = sizeof(struct dfsreq_nolinfo);
912 nol = (struct dfsreq_nolinfo *)outdata;
913 dfs_get_nol(dfs, (struct dfsreq_nolelem *)nol->dfs_nol,
914 &nol->ic_nchans);
915 dfs_print_nol(dfs);
916 break;
917 case DFS_SET_NOL:
918 if (insize < sizeof(struct dfsreq_nolinfo) || !indata) {
919 error = -EINVAL;
920 break;
921 }
922 nol = (struct dfsreq_nolinfo *)indata;
923 dfs_set_nol(dfs, (struct dfsreq_nolelem *)nol->dfs_nol,
924 nol->ic_nchans);
925 break;
926
927 case DFS_SHOW_NOL:
928 dfs_print_nol(dfs);
929 break;
930 case DFS_BANGRADAR:
931#if 0 /* MERGE_TBD */
932 if (sc->sc_nostabeacons) {
933 printk("No radar detection Enabled \n");
934 break;
935 }
936#endif
937 dfs->dfs_bangradar = 1;
938 dfs->ath_radar_tasksched = 1;
939 OS_SET_TIMER(&dfs->ath_dfs_task_timer, 0);
940 break;
941#endif /* ATH_DFS_RADAR_DETECTION_ONLY */
942 default:
943 error = -EINVAL;
944 }
945bad:
946 return error;
947}
948
949int
950dfs_set_thresholds(struct ieee80211com *ic, const uint32_t threshtype,
951 const uint32_t value)
952{
953 struct ath_dfs *dfs = (struct ath_dfs *)ic->ic_dfs;
954 int16_t chanindex;
955 struct dfs_state *rs;
956 struct ath_dfs_phyerr_param pe;
957 int is_fastclk = 0; /* XXX throw-away */
958
959 if (dfs == NULL) {
960 DFS_DPRINTK(dfs, ATH_DEBUG_DFS1, "%s: ic_dfs is NULL\n",
961 __func__);
962 return 0;
963 }
964
965 chanindex = dfs->dfs_curchan_radindex;
966 if ((chanindex < 0) || (chanindex >= DFS_NUM_RADAR_STATES)) {
967 DFS_DPRINTK(dfs, ATH_DEBUG_DFS1,
968 "%s: chanindex = %d, DFS_NUM_RADAR_STATES=%d\n",
969 __func__, chanindex, DFS_NUM_RADAR_STATES);
970 return 0;
971 }
972
973 DFS_DPRINTK(dfs, ATH_DEBUG_DFS,
974 "%s: threshtype=%d, value=%d\n", __func__, threshtype,
975 value);
976
977 ath_dfs_phyerr_init_noval(&pe);
978
979 rs = &(dfs->dfs_radar[chanindex]);
980 switch (threshtype) {
981 case DFS_PARAM_FIRPWR:
982 rs->rs_param.pe_firpwr = (int32_t) value;
983 pe.pe_firpwr = value;
984 break;
985 case DFS_PARAM_RRSSI:
986 rs->rs_param.pe_rrssi = value;
987 pe.pe_rrssi = value;
988 break;
989 case DFS_PARAM_HEIGHT:
990 rs->rs_param.pe_height = value;
991 pe.pe_height = value;
992 break;
993 case DFS_PARAM_PRSSI:
994 rs->rs_param.pe_prssi = value;
995 pe.pe_prssi = value;
996 break;
997 case DFS_PARAM_INBAND:
998 rs->rs_param.pe_inband = value;
999 pe.pe_inband = value;
1000 break;
1001 /* 5413 specific */
1002 case DFS_PARAM_RELPWR:
1003 rs->rs_param.pe_relpwr = value;
1004 pe.pe_relpwr = value;
1005 break;
1006 case DFS_PARAM_RELSTEP:
1007 rs->rs_param.pe_relstep = value;
1008 pe.pe_relstep = value;
1009 break;
1010 case DFS_PARAM_MAXLEN:
1011 rs->rs_param.pe_maxlen = value;
1012 pe.pe_maxlen = value;
1013 break;
1014 default:
1015 DFS_DPRINTK(dfs, ATH_DEBUG_DFS1,
1016 "%s: unknown threshtype (%d)\n",
1017 __func__, threshtype);
1018 break;
1019 }
1020
1021 /*
1022 * The driver layer dfs_enable routine is tasked with translating
1023 * values from the global format to the per-device (HAL, offload)
1024 * format.
1025 */
1026 ic->ic_dfs_enable(ic, &is_fastclk, &pe);
1027 return 1;
1028}
1029
1030int
1031dfs_get_thresholds(struct ieee80211com *ic, struct ath_dfs_phyerr_param *param)
1032{
1033 /* UMACDFS : TODO:ath_hal_getdfsthresh(sc->sc_ah, param); */
1034
1035 OS_MEMZERO(param, sizeof(*param));
1036
1037 (void)ic->ic_dfs_get_thresholds(ic, param);
1038
1039 return 1;
1040}
1041
1042uint16_t dfs_usenol(struct ieee80211com *ic)
1043{
1044 struct ath_dfs *dfs = (struct ath_dfs *)ic->ic_dfs;
1045 return dfs ? (uint16_t) dfs->dfs_rinfo.rn_use_nol : 0;
1046}
1047
1048uint16_t dfs_isdfsregdomain(struct ieee80211com *ic)
1049{
1050 struct ath_dfs *dfs = (struct ath_dfs *)ic->ic_dfs;
1051 return dfs ? dfs->dfsdomain : 0;
1052}
1053
1054#endif /* ATH_UPPORT_DFS */