blob: 4d7fbbc4ff652ef79180002df46108e35a86831f [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Anurag Chouhan6d760662016-02-20 16:05:43 +05302 * Copyright (c) 2002-2016 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28/*===========================================================================
29
Anurag Chouhan6d760662016-02-20 16:05:43 +053030 dfs.c
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080031
32 OVERVIEW:
33
34 Source code borrowed from QCA_MAIN DFS module
35
36 DEPENDENCIES:
37
38 Are listed for each API below.
39
40 ===========================================================================*/
41
42/*===========================================================================
43
Anurag Chouhan6d760662016-02-20 16:05:43 +053044 EDIT HISTORY FOR FILE
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080045
46 This section contains comments describing changes made to the module.
47 Notice that changes are listed in reverse chronological order.
48
49 when who what, where, why
50 ---------- --- --------------------------------------------------------
51
52 ===========================================================================*/
53
54#include <osdep.h>
55
56#ifndef ATH_SUPPORT_DFS
57#define ATH_SUPPORT_DFS 1
58
59/* #include "if_athioctl.h" */
60/* #include "if_athvar.h" */
61#include "dfs_ioctl.h"
62#include "dfs.h"
63
64int domainoverride = DFS_UNINIT_DOMAIN;
65
66/*
67** channel switch announcement (CSA)
68** usenol=1 (default) make CSA and switch to a new channel on radar detect
69** usenol=0, make CSA with next channel same as current on radar detect
70** usenol=2, no CSA and stay on the same channel on radar detect
71**/
72
73int usenol = 1;
74uint32_t dfs_debug_level = ATH_DEBUG_DFS;
75
76#if 0 /* the code to call this is curently commented-out below */
77/*
78 * Mark a channel as having interference detected upon it.
79 *
80 * This adds the interference marker to both the primary and
81 * extension channel.
82 *
83 * XXX TODO: make the NOL and channel interference logic a bit smarter
84 * so only the channel with the radar event is marked, rather than
85 * both the primary and extension.
86 */
87static void
Chandrasekaran, Manishekar22a7e1e2015-11-05 10:38:49 +053088dfs_channel_mark_radar(struct ath_dfs *dfs, struct dfs_ieee80211_channel *chan)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080089{
90 struct ieee80211_channel_list chan_info;
91 int i;
92
93 /* chan->ic_flagext |= CHANNEL_INTERFERENCE; */
94
95 /*
96 * If radar is detected in 40MHz mode, add both the primary and the
97 * extension channels to the NOL. chan is the channel data we return
98 * to the ath_dev layer which passes it on to the 80211 layer.
99 * As we want the AP to change channels and send out a CSA,
100 * we always pass back the primary channel data to the ath_dev layer.
101 */
102 if ((dfs->dfs_rinfo.rn_use_nol == 1) &&
103 (dfs->ic->ic_opmode == IEEE80211_M_HOSTAP ||
104 dfs->ic->ic_opmode == IEEE80211_M_IBSS)) {
105 chan_info.cl_nchans = 0;
106 dfs->ic->ic_get_ext_chan_info(dfs->ic, &chan_info);
107
108 for (i = 0; i < chan_info.cl_nchans; i++) {
109 if (chan_info.cl_channels[i] == NULL) {
110 DFS_PRINTK("%s: NULL channel\n", __func__);
111 } else {
112 chan_info.cl_channels[i]->ic_flagext |=
113 CHANNEL_INTERFERENCE;
114 dfs_nol_addchan(dfs, chan_info.cl_channels[i],
115 dfs->ath_dfs_nol_timeout);
116 }
117 }
118
119 /*
120 * Update the umac/driver channels with the new NOL information.
121 */
122 dfs_nol_update(dfs);
123 }
124}
125#endif /* #if 0 */
126
127static os_timer_func(dfs_task)
128{
129 struct ieee80211com *ic;
130 struct ath_dfs *dfs = NULL;
131
132 OS_GET_TIMER_ARG(ic, struct ieee80211com *);
133 dfs = (struct ath_dfs *)ic->ic_dfs;
134 /*
135 * XXX no locking?!
136 */
137 if (dfs_process_radarevent(dfs, ic->ic_curchan)) {
138#ifndef ATH_DFS_RADAR_DETECTION_ONLY
139
140 /*
141 * This marks the channel (and the extension channel, if HT40) as
142 * having seen a radar event. It marks CHAN_INTERFERENCE and
143 * will add it to the local NOL implementation.
144 *
145 * This is only done for 'usenol=1', as the other two modes
146 * don't do radar notification or CAC/CSA/NOL; it just notes
147 * there was a radar.
148 */
149
150 if (dfs->dfs_rinfo.rn_use_nol == 1) {
151 /* dfs_channel_mark_radar(dfs, ic->ic_curchan); */
152 }
153#endif /* ATH_DFS_RADAR_DETECTION_ONLY */
154
155 /*
156 * This calls into the umac DFS code, which sets the umac related
157 * radar flags and begins the channel change machinery.
158 *
159 * XXX TODO: the umac NOL code isn't used, but IEEE80211_CHAN_RADAR
160 * still gets set. Since the umac NOL code isn't used, that flag
161 * is never cleared. This needs to be fixed. See EV 105776.
162 */
163 if (dfs->dfs_rinfo.rn_use_nol == 1) {
164 ic->ic_dfs_notify_radar(ic, ic->ic_curchan);
165 } else if (dfs->dfs_rinfo.rn_use_nol == 0) {
166 /*
167 * For the test mode, don't do a CSA here; but setup the
168 * test timer so we get a CSA _back_ to the original channel.
169 */
170 OS_CANCEL_TIMER(&dfs->ath_dfstesttimer);
171 dfs->ath_dfstest = 1;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530172 qdf_spin_lock_bh(&ic->chan_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800173 dfs->ath_dfstest_ieeechan = ic->ic_curchan->ic_ieee;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530174 qdf_spin_unlock_bh(&ic->chan_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800175 dfs->ath_dfstesttime = 1; /* 1ms */
176 OS_SET_TIMER(&dfs->ath_dfstesttimer,
177 dfs->ath_dfstesttime);
178 }
179 }
180 dfs->ath_radar_tasksched = 0;
181}
182
183static os_timer_func(dfs_testtimer_task)
184{
185 struct ieee80211com *ic;
186 struct ath_dfs *dfs = NULL;
187
188 OS_GET_TIMER_ARG(ic, struct ieee80211com *);
189 dfs = (struct ath_dfs *)ic->ic_dfs;
190
191 /* XXX no locking? */
192 dfs->ath_dfstest = 0;
193
194 /*
195 * Flip the channel back to the original channel.
196 * Make sure this is done properly with a CSA.
197 */
198 DFS_PRINTK("%s: go back to channel %d\n",
199 __func__, dfs->ath_dfstest_ieeechan);
200
201 /*
202 * XXX The mere existence of this method indirection
203 * to a umac function means this code belongs in
204 * the driver, _not_ here. Please fix this!
205 */
206 ic->ic_start_csa(ic, dfs->ath_dfstest_ieeechan);
207}
208
209static int dfs_get_debug_info(struct ieee80211com *ic, int type, void *data)
210{
211 struct ath_dfs *dfs = (struct ath_dfs *)ic->ic_dfs;
212 if (data) {
213 *(uint32_t *) data = dfs->dfs_proc_phyerr;
214 }
215 return (int)dfs->dfs_proc_phyerr;
216}
217
218int dfs_attach(struct ieee80211com *ic)
219{
220 int i, n;
221 struct ath_dfs *dfs = (struct ath_dfs *)ic->ic_dfs;
222 struct ath_dfs_radar_tab_info radar_info;
223
224 if (dfs != NULL) {
225 /*DFS_DPRINTK(dfs, ATH_DEBUG_DFS1,
226 "%s: ic_dfs was not NULL\n",
227 __func__);
228 */
229 return 1;
230 }
231
Anurag Chouhan2ed1fce2016-02-22 15:07:01 +0530232 dfs = (struct ath_dfs *)os_malloc(NULL, sizeof(struct ath_dfs),
233 GFP_ATOMIC);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800234
235 if (dfs == NULL) {
236 /*DFS_DPRINTK(dfs, ATH_DEBUG_DFS1,
237 "%s: ath_dfs allocation failed\n", __func__); */
238 return 1;
239 }
240
241 OS_MEMZERO(dfs, sizeof(struct ath_dfs));
242
243 ic->ic_dfs = (void *)dfs;
244
245 dfs->ic = ic;
246
247 ic->ic_dfs_debug = dfs_get_debug_info;
248#ifndef ATH_DFS_RADAR_DETECTION_ONLY
249 dfs->dfs_nol = NULL;
250#endif
251
252 /*
253 * Zero out radar_info. It's possible that the attach function won't
254 * fetch an initial regulatory configuration; you really do want to
255 * ensure that the contents indicates there aren't any filters.
256 */
257 OS_MEMZERO(&radar_info, sizeof(radar_info));
258 ic->ic_dfs_attach(ic, &dfs->dfs_caps, &radar_info);
259 dfs_clear_stats(ic);
260 dfs->dfs_event_log_on = 0;
261 OS_INIT_TIMER(NULL, &(dfs->ath_dfs_task_timer), dfs_task, (void *)(ic),
Anurag Chouhan6d760662016-02-20 16:05:43 +0530262 QDF_TIMER_TYPE_SW);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800263#ifndef ATH_DFS_RADAR_DETECTION_ONLY
264 OS_INIT_TIMER(NULL, &(dfs->ath_dfstesttimer), dfs_testtimer_task,
Anurag Chouhan6d760662016-02-20 16:05:43 +0530265 (void *)ic, QDF_TIMER_TYPE_SW);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800266 dfs->ath_dfs_cac_time = ATH_DFS_WAIT_MS;
267 dfs->ath_dfstesttime = ATH_DFS_TEST_RETURN_PERIOD_MS;
268#endif
269 ATH_DFSQ_LOCK_INIT(dfs);
270 STAILQ_INIT(&dfs->dfs_radarq);
271 ATH_ARQ_LOCK_INIT(dfs);
272 STAILQ_INIT(&dfs->dfs_arq);
273 STAILQ_INIT(&(dfs->dfs_eventq));
274 ATH_DFSEVENTQ_LOCK_INIT(dfs);
275 dfs->events = (struct dfs_event *)os_malloc(NULL,
276 sizeof(struct dfs_event) *
277 DFS_MAX_EVENTS, GFP_ATOMIC);
278 if (dfs->events == NULL) {
279 OS_FREE(dfs);
280 ic->ic_dfs = NULL;
281 DFS_PRINTK("%s: events allocation failed\n", __func__);
282 return 1;
283 }
284 for (i = 0; i < DFS_MAX_EVENTS; i++) {
285 STAILQ_INSERT_TAIL(&(dfs->dfs_eventq), &dfs->events[i],
286 re_list);
287 }
288
289 dfs->pulses =
290 (struct dfs_pulseline *)os_malloc(NULL,
291 sizeof(struct dfs_pulseline),
292 GFP_ATOMIC);
293 if (dfs->pulses == NULL) {
294 OS_FREE(dfs->events);
295 dfs->events = NULL;
296 OS_FREE(dfs);
297 ic->ic_dfs = NULL;
298 DFS_PRINTK("%s: pulse buffer allocation failed\n", __func__);
299 return 1;
300 }
301
Rakesh Sunkif7f82e52015-12-14 15:09:40 -0800302 /*
303 * If the chip supports DFS-3 then allocate
304 * memory for pulses for extension segment.
305 */
306 if (ic->dfs_hw_bd_id != DFS_HWBD_QCA6174) {
307 dfs->pulses_ext_seg = (struct dfs_pulseline *)
308 os_malloc(NULL,
309 sizeof(struct dfs_pulseline),
310 GFP_ATOMIC);
311 if (dfs->pulses_ext_seg == NULL) {
312 OS_FREE(dfs->events);
313 dfs->events = NULL;
314 OS_FREE(dfs);
315 ic->ic_dfs = NULL;
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530316 QDF_TRACE(QDF_MODULE_ID_SAP, QDF_TRACE_LEVEL_ERROR,
Rakesh Sunkif7f82e52015-12-14 15:09:40 -0800317 "%s[%d]: pulse buffer allocation failed",
318 __func__, __LINE__);
319 return 1;
320 }
321 dfs->pulses_ext_seg->pl_lastelem = DFS_MAX_PULSE_BUFFER_MASK;
322 }
323
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800324 dfs->pulses->pl_lastelem = DFS_MAX_PULSE_BUFFER_MASK;
325
326 /* Allocate memory for radar filters */
327 for (n = 0; n < DFS_MAX_RADAR_TYPES; n++) {
328 dfs->dfs_radarf[n] =
329 (struct dfs_filtertype *)os_malloc(NULL,
330 sizeof(struct
331 dfs_filtertype),
332 GFP_ATOMIC);
333 if (dfs->dfs_radarf[n] == NULL) {
334 DFS_PRINTK
335 ("%s: cannot allocate memory for radar filter types\n",
336 __func__);
337 goto bad1;
338 }
339 OS_MEMZERO(dfs->dfs_radarf[n], sizeof(struct dfs_filtertype));
340 }
341 /* Allocate memory for radar table */
342 dfs->dfs_radartable =
Anurag Chouhan6d760662016-02-20 16:05:43 +0530343 (int8_t **) os_malloc(NULL, 256 * sizeof(int8_t *), GFP_ATOMIC);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800344 if (dfs->dfs_radartable == NULL) {
345 DFS_PRINTK("%s: cannot allocate memory for radar table\n",
346 __func__);
347 goto bad1;
348 }
349 for (n = 0; n < 256; n++) {
350 dfs->dfs_radartable[n] =
351 os_malloc(NULL, DFS_MAX_RADAR_OVERLAP * sizeof(int8_t),
352 GFP_ATOMIC);
353 if (dfs->dfs_radartable[n] == NULL) {
354 DFS_PRINTK
355 ("%s: cannot allocate memory for radar table entry\n",
356 __func__);
357 goto bad2;
358 }
359 }
360
361 if (usenol == 0)
362 DFS_PRINTK("%s: NOL disabled\n", __func__);
363 else if (usenol == 2)
364 DFS_PRINTK("%s: NOL disabled; no CSA\n", __func__);
365
366 dfs->dfs_rinfo.rn_use_nol = usenol;
367
368 /* Init the cached extension channel busy for false alarm reduction */
369 dfs->dfs_rinfo.ext_chan_busy_ts = ic->ic_get_TSF64(ic);
370 dfs->dfs_rinfo.dfs_ext_chan_busy = 0;
371 /* Init the Bin5 chirping related data */
372 dfs->dfs_rinfo.dfs_bin5_chirp_ts = dfs->dfs_rinfo.ext_chan_busy_ts;
373 dfs->dfs_rinfo.dfs_last_bin5_dur = MAX_BIN5_DUR;
374 dfs->dfs_b5radars = NULL;
Rakesh Sunkif7f82e52015-12-14 15:09:40 -0800375 if (ic->dfs_hw_bd_id != DFS_HWBD_QCA6174) {
376 dfs->dfs_rinfo.dfs_last_bin5_dur_ext_seg = MAX_BIN5_DUR;
377 dfs->dfs_b5radars_ext_seg = NULL;
378 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800379
380 /*
381 * If dfs_init_radar_filters() fails, we can abort here and
382 * reconfigure when the first valid channel + radar config
383 * is available.
384 */
385 if (dfs_init_radar_filters(ic, &radar_info)) {
386 DFS_PRINTK(" %s: Radar Filter Intialization Failed \n",
387 __func__);
388 return 1;
389 }
390
391 dfs->ath_dfs_false_rssi_thres = RSSI_POSSIBLY_FALSE;
392 dfs->ath_dfs_peak_mag = SEARCH_FFT_REPORT_PEAK_MAG_THRSH;
393 dfs->dfs_phyerr_freq_min = 0x7fffffff;
394 dfs->dfs_phyerr_freq_max = 0;
395 dfs->dfs_phyerr_queued_count = 0;
396 dfs->dfs_phyerr_w53_counter = 0;
397 dfs->dfs_pri_multiplier = 2;
398
399 dfs->ath_dfs_nol_timeout = DFS_NOL_TIMEOUT_S;
400 return 0;
401
402bad2:
403 OS_FREE(dfs->dfs_radartable);
404 dfs->dfs_radartable = NULL;
405bad1:
406 for (n = 0; n < DFS_MAX_RADAR_TYPES; n++) {
407 if (dfs->dfs_radarf[n] != NULL) {
408 OS_FREE(dfs->dfs_radarf[n]);
409 dfs->dfs_radarf[n] = NULL;
410 }
411 }
412 if (dfs->pulses) {
413 OS_FREE(dfs->pulses);
414 dfs->pulses = NULL;
415 }
Rakesh Sunkif7f82e52015-12-14 15:09:40 -0800416 if (dfs->pulses_ext_seg &&
417 ic->dfs_hw_bd_id != DFS_HWBD_QCA6174) {
418 OS_FREE(dfs->pulses_ext_seg);
419 dfs->pulses_ext_seg = NULL;
420 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800421 if (dfs->events) {
422 OS_FREE(dfs->events);
423 dfs->events = NULL;
424 }
425
426 if (ic->ic_dfs) {
427 OS_FREE(ic->ic_dfs);
428 ic->ic_dfs = NULL;
429 }
430 return 1;
431#undef N
432}
433
434void dfs_detach(struct ieee80211com *ic)
435{
436 struct ath_dfs *dfs = (struct ath_dfs *)ic->ic_dfs;
437 int n, empty;
438
439 if (dfs == NULL) {
440 DFS_DPRINTK(dfs, ATH_DEBUG_DFS1, "%s: ic_dfs is NULL\n",
441 __func__);
442 return;
443 }
444
445 /* Bug 29099 make sure all outstanding timers are cancelled */
446
447 if (dfs->ath_radar_tasksched) {
448 OS_CANCEL_TIMER(&dfs->ath_dfs_task_timer);
449 dfs->ath_radar_tasksched = 0;
450 }
451
452 if (dfs->ath_dfstest) {
453 OS_CANCEL_TIMER(&dfs->ath_dfstesttimer);
454 dfs->ath_dfstest = 0;
455 }
456#if 0
457#ifndef ATH_DFS_RADAR_DETECTION_ONLY
458 if (dfs->ic_dfswait) {
459 OS_CANCEL_TIMER(&dfs->ic_dfswaittimer);
460 dfs->ath_dfswait = 0;
461 }
462
463 OS_CANCEL_TIMER(&dfs->sc_dfs_war_timer);
464 if (dfs->dfs_nol != NULL) {
465 struct dfs_nolelem *nol, *next;
466 nol = dfs->dfs_nol;
467 /* Bug 29099 - each NOL element has its own timer, cancel it and
468 free the element */
469 while (nol != NULL) {
470 OS_CANCEL_TIMER(&nol->nol_timer);
471 next = nol->nol_next;
472 OS_FREE(nol);
473 nol = next;
474 }
475 dfs->dfs_nol = NULL;
476 }
477#endif
478#endif
Rakesh Sunkif7f82e52015-12-14 15:09:40 -0800479
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800480 /* Return radar events to free q */
481 dfs_reset_radarq(dfs);
Rakesh Sunkif7f82e52015-12-14 15:09:40 -0800482 dfs_reset_alldelaylines(dfs, DFS_80P80_SEG0);
483 if (ic->dfs_hw_bd_id != DFS_HWBD_QCA6174)
484 dfs_reset_alldelaylines(dfs, DFS_80P80_SEG1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800485
486 /* Free up pulse log */
487 if (dfs->pulses != NULL) {
488 OS_FREE(dfs->pulses);
489 dfs->pulses = NULL;
490 }
491
Krishna Kumaar Natarajan0379b592016-02-09 16:18:31 -0800492 if (dfs->pulses_ext_seg != NULL) {
Rakesh Sunkif7f82e52015-12-14 15:09:40 -0800493 OS_FREE(dfs->pulses_ext_seg);
494 dfs->pulses_ext_seg = NULL;
495 }
496
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800497 for (n = 0; n < DFS_MAX_RADAR_TYPES; n++) {
498 if (dfs->dfs_radarf[n] != NULL) {
499 OS_FREE(dfs->dfs_radarf[n]);
500 dfs->dfs_radarf[n] = NULL;
501 }
502 }
503
504 if (dfs->dfs_radartable != NULL) {
505 for (n = 0; n < 256; n++) {
506 if (dfs->dfs_radartable[n] != NULL) {
507 OS_FREE(dfs->dfs_radartable[n]);
508 dfs->dfs_radartable[n] = NULL;
509 }
510 }
511 OS_FREE(dfs->dfs_radartable);
512 dfs->dfs_radartable = NULL;
513#ifndef ATH_DFS_RADAR_DETECTION_ONLY
514 dfs->ath_dfs_isdfsregdomain = 0;
515#endif
516 }
517
518 if (dfs->dfs_b5radars != NULL) {
519 OS_FREE(dfs->dfs_b5radars);
520 dfs->dfs_b5radars = NULL;
521 }
Krishna Kumaar Natarajan0379b592016-02-09 16:18:31 -0800522 if (dfs->dfs_b5radars_ext_seg != NULL) {
Rakesh Sunkif7f82e52015-12-14 15:09:40 -0800523 OS_FREE(dfs->dfs_b5radars_ext_seg);
524 dfs->dfs_b5radars_ext_seg = NULL;
525 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800526
527/* Commenting out since all the ar functions are obsolete and
528 * the function definition has been removed as part of dfs_ar.c
529 * dfs_reset_ar(dfs);
530 */
531 ATH_ARQ_LOCK(dfs);
532 empty = STAILQ_EMPTY(&(dfs->dfs_arq));
533 ATH_ARQ_UNLOCK(dfs);
534 if (!empty) {
535/*
536 * Commenting out since all the ar functions are obsolete and
537 * the function definition has been removed as part of dfs_ar.c
538 *
539 * dfs_reset_arq(dfs);
540 */
541 }
542 if (dfs->events != NULL) {
543 OS_FREE(dfs->events);
544 dfs->events = NULL;
545 }
546 dfs_nol_timer_cleanup(dfs);
547 OS_FREE(dfs);
548
549 /* XXX? */
550 ic->ic_dfs = NULL;
551}
552
553/*
554 * This is called each time a channel change occurs, to (potentially) enable
555 * the radar code.
556 */
557int dfs_radar_disable(struct ieee80211com *ic)
558{
559 struct ath_dfs *dfs = (struct ath_dfs *)ic->ic_dfs;
560#ifdef ATH_ENABLE_AR
561 dfs->dfs_proc_phyerr &= ~DFS_AR_EN;
562#endif
563 dfs->dfs_proc_phyerr &= ~DFS_RADAR_EN;
564 return 0;
565}
566
567/*
568 * This is called each time a channel change occurs, to (potentially) enable
569 * the radar code.
570 */
571int dfs_radar_enable(struct ieee80211com *ic,
572 struct ath_dfs_radar_tab_info *radar_info)
573{
574 int is_ext_ch;
575 int is_fastclk = 0;
576 int radar_filters_init_status = 0;
577 /* uint32_t rfilt; */
578 struct ath_dfs *dfs;
579 struct dfs_state *rs_pri, *rs_ext;
Chandrasekaran, Manishekar22a7e1e2015-11-05 10:38:49 +0530580 struct dfs_ieee80211_channel *chan = ic->ic_curchan, *ext_ch = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800581 is_ext_ch = IEEE80211_IS_CHAN_11N_HT40(ic->ic_curchan);
582 dfs = (struct ath_dfs *)ic->ic_dfs;
583 rs_pri = NULL;
584 rs_ext = NULL;
585#if 0
586 int i;
587#endif
588 if (dfs == NULL) {
589 DFS_DPRINTK(dfs, ATH_DEBUG_DFS, "%s: ic_dfs is NULL\n",
590 __func__);
591
592 return -EIO;
593 }
594 ic->ic_dfs_disable(ic);
595
596 /*
597 * Setting country code might change the DFS domain
598 * so initialize the DFS Radar filters
599 */
600 radar_filters_init_status = dfs_init_radar_filters(ic, radar_info);
601
602 /*
603 * dfs_init_radar_filters() returns 1 on failure and
604 * 0 on success.
605 */
606 if (DFS_STATUS_FAIL == radar_filters_init_status) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530607 QDF_TRACE(QDF_MODULE_ID_SAP, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800608 "%s[%d]: DFS Radar Filters Initialization Failed",
609 __func__, __LINE__);
610 return -EIO;
611 }
612
613 if ((ic->ic_opmode == IEEE80211_M_HOSTAP
614 || ic->ic_opmode == IEEE80211_M_IBSS)) {
615
616 if (IEEE80211_IS_CHAN_DFS(chan)) {
617
618 uint8_t index_pri, index_ext;
619#ifdef ATH_ENABLE_AR
620 dfs->dfs_proc_phyerr |= DFS_AR_EN;
621#endif
622 dfs->dfs_proc_phyerr |= DFS_RADAR_EN;
623
624 if (is_ext_ch) {
625 ext_ch = ieee80211_get_extchan(ic);
626 }
Rakesh Sunkif7f82e52015-12-14 15:09:40 -0800627 dfs_reset_alldelaylines(dfs, DFS_80P80_SEG0);
628 /*
629 * Extension segment delaylines will be
630 * enabled only when SAP operates in 80p80
631 * and both the channels are DFS.
632 */
633 if (chan->ic_80p80_both_dfs)
634 dfs_reset_alldelaylines(dfs, DFS_80P80_SEG1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800635
636 rs_pri = dfs_getchanstate(dfs, &index_pri, 0);
637 if (ext_ch) {
638 rs_ext = dfs_getchanstate(dfs, &index_ext, 1);
639 }
640 if (rs_pri != NULL
641 && ((ext_ch == NULL) || (rs_ext != NULL))) {
642 struct ath_dfs_phyerr_param pe;
643
644 OS_MEMSET(&pe, '\0', sizeof(pe));
645
Rakesh Sunkif7f82e52015-12-14 15:09:40 -0800646 if (index_pri != dfs->dfs_curchan_radindex) {
647 dfs_reset_alldelaylines(dfs,
648 DFS_80P80_SEG0);
649 /*
650 * Reset only when ext segment is
651 * present
652 */
653 if (chan->ic_80p80_both_dfs)
654 dfs_reset_alldelaylines(dfs,
655 DFS_80P80_SEG1);
656 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800657 dfs->dfs_curchan_radindex = (int16_t) index_pri;
658 dfs->dfs_pri_multiplier_ini =
659 radar_info->dfs_pri_multiplier;
660
661 if (rs_ext)
662 dfs->dfs_extchan_radindex =
663 (int16_t) index_ext;
664
665 ath_dfs_phyerr_param_copy(&pe,
666 &rs_pri->rs_param);
667 DFS_DPRINTK(dfs, ATH_DEBUG_DFS3,
668 "%s: firpwr=%d, rssi=%d, height=%d, "
669 "prssi=%d, inband=%d, relpwr=%d, "
670 "relstep=%d, maxlen=%d\n",
671 __func__,
672 pe.pe_firpwr,
673 pe.pe_rrssi,
674 pe.pe_height,
675 pe.pe_prssi,
676 pe.pe_inband,
677 pe.pe_relpwr,
678 pe.pe_relstep, pe.pe_maxlen);
679
680 ic->ic_dfs_enable(ic, &is_fastclk, &pe);
681 DFS_DPRINTK(dfs, ATH_DEBUG_DFS,
682 "Enabled radar detection on channel %d\n",
683 chan->ic_freq);
684 dfs->dur_multiplier =
685 is_fastclk ? DFS_FAST_CLOCK_MULTIPLIER :
686 DFS_NO_FAST_CLOCK_MULTIPLIER;
687 DFS_DPRINTK(dfs, ATH_DEBUG_DFS3,
688 "%s: duration multiplier is %d\n",
689 __func__, dfs->dur_multiplier);
690 } else
691 DFS_DPRINTK(dfs, ATH_DEBUG_DFS,
692 "%s: No more radar states left\n",
693 __func__);
694 }
695 }
696
697 return DFS_STATUS_SUCCESS;
698}
699
700int
701dfs_control(struct ieee80211com *ic, u_int id,
702 void *indata, uint32_t insize, void *outdata, uint32_t *outsize)
703{
704 int error = 0;
705 struct ath_dfs_phyerr_param peout;
706 struct ath_dfs *dfs = (struct ath_dfs *)ic->ic_dfs;
707 struct dfs_ioctl_params *dfsparams;
708 uint32_t val = 0;
709#ifndef ATH_DFS_RADAR_DETECTION_ONLY
710 struct dfsreq_nolinfo *nol;
711 uint32_t *data = NULL;
712#endif /* ATH_DFS_RADAR_DETECTION_ONLY */
713 int i;
714
715 if (dfs == NULL) {
716 error = -EINVAL;
717 DFS_DPRINTK(dfs, ATH_DEBUG_DFS1, "%s DFS is null\n", __func__);
718 goto bad;
719 }
720
721 switch (id) {
722 case DFS_SET_THRESH:
723 if (insize < sizeof(struct dfs_ioctl_params) || !indata) {
724 DFS_DPRINTK(dfs, ATH_DEBUG_DFS1,
725 "%s: insize=%d, expected=%zu bytes, indata=%p\n",
726 __func__, insize,
727 sizeof(struct dfs_ioctl_params), indata);
728 error = -EINVAL;
729 break;
730 }
731 dfsparams = (struct dfs_ioctl_params *)indata;
732 if (!dfs_set_thresholds
733 (ic, DFS_PARAM_FIRPWR, dfsparams->dfs_firpwr))
734 error = -EINVAL;
735 if (!dfs_set_thresholds
736 (ic, DFS_PARAM_RRSSI, dfsparams->dfs_rrssi))
737 error = -EINVAL;
738 if (!dfs_set_thresholds
739 (ic, DFS_PARAM_HEIGHT, dfsparams->dfs_height))
740 error = -EINVAL;
741 if (!dfs_set_thresholds
742 (ic, DFS_PARAM_PRSSI, dfsparams->dfs_prssi))
743 error = -EINVAL;
744 if (!dfs_set_thresholds
745 (ic, DFS_PARAM_INBAND, dfsparams->dfs_inband))
746 error = -EINVAL;
747 /* 5413 speicfic */
748 if (!dfs_set_thresholds
749 (ic, DFS_PARAM_RELPWR, dfsparams->dfs_relpwr))
750 error = -EINVAL;
751 if (!dfs_set_thresholds
752 (ic, DFS_PARAM_RELSTEP, dfsparams->dfs_relstep))
753 error = -EINVAL;
754 if (!dfs_set_thresholds
755 (ic, DFS_PARAM_MAXLEN, dfsparams->dfs_maxlen))
756 error = -EINVAL;
757 break;
758 case DFS_GET_THRESH:
759 if (!outdata || !outsize
760 || *outsize < sizeof(struct dfs_ioctl_params)) {
761 error = -EINVAL;
762 break;
763 }
764 *outsize = sizeof(struct dfs_ioctl_params);
765 dfsparams = (struct dfs_ioctl_params *)outdata;
766
767 /*
768 * Fetch the DFS thresholds using the internal representation.
769 */
770 (void)dfs_get_thresholds(ic, &peout);
771
772 /*
773 * Convert them to the dfs IOCTL representation.
774 */
775 ath_dfs_dfsparam_to_ioctlparam(&peout, dfsparams);
776 break;
777 case DFS_RADARDETECTS:
778 if (!outdata || !outsize || *outsize < sizeof(uint32_t)) {
779 error = -EINVAL;
780 break;
781 }
782 *outsize = sizeof(uint32_t);
783 *((uint32_t *) outdata) = dfs->ath_dfs_stats.num_radar_detects;
784 break;
785 case DFS_DISABLE_DETECT:
786 dfs->dfs_proc_phyerr &= ~DFS_RADAR_EN;
787 dfs->ic->ic_dfs_state.ignore_dfs = 1;
788 DFS_PRINTK("%s enable detects, ignore_dfs %d\n",
789 __func__, dfs->ic->ic_dfs_state.ignore_dfs);
790 break;
791 case DFS_ENABLE_DETECT:
792 dfs->dfs_proc_phyerr |= DFS_RADAR_EN;
793 dfs->ic->ic_dfs_state.ignore_dfs = 0;
794 DFS_PRINTK("%s enable detects, ignore_dfs %d\n",
795 __func__, dfs->ic->ic_dfs_state.ignore_dfs);
796 break;
797 case DFS_DISABLE_FFT:
798 /* UMACDFS: TODO: val = ath_hal_dfs_config_fft(sc->sc_ah, false); */
799 DFS_PRINTK("%s TODO disable FFT val=0x%x \n", __func__, val);
800 break;
801 case DFS_ENABLE_FFT:
802 /* UMACDFS TODO: val = ath_hal_dfs_config_fft(sc->sc_ah, true); */
803 DFS_PRINTK("%s TODO enable FFT val=0x%x \n", __func__, val);
804 break;
805 case DFS_SET_DEBUG_LEVEL:
806 if (insize < sizeof(uint32_t) || !indata) {
807 error = -EINVAL;
808 break;
809 }
810 dfs->dfs_debug_mask = *(uint32_t *) indata;
811 DFS_PRINTK("%s debug level now = 0x%x \n",
812 __func__, dfs->dfs_debug_mask);
813 if (dfs->dfs_debug_mask & ATH_DEBUG_DFS3) {
814 /* Enable debug Radar Event */
815 dfs->dfs_event_log_on = 1;
816 } else {
817 dfs->dfs_event_log_on = 0;
818 }
819 break;
820 case DFS_SET_FALSE_RSSI_THRES:
821 if (insize < sizeof(uint32_t) || !indata) {
822 error = -EINVAL;
823 break;
824 }
825 dfs->ath_dfs_false_rssi_thres = *(uint32_t *) indata;
826 DFS_PRINTK("%s false RSSI threshold now = 0x%x \n",
827 __func__, dfs->ath_dfs_false_rssi_thres);
828 break;
829 case DFS_SET_PEAK_MAG:
830 if (insize < sizeof(uint32_t) || !indata) {
831 error = -EINVAL;
832 break;
833 }
834 dfs->ath_dfs_peak_mag = *(uint32_t *) indata;
835 DFS_PRINTK("%s peak_mag now = 0x%x \n",
836 __func__, dfs->ath_dfs_peak_mag);
837 break;
838 case DFS_IGNORE_CAC:
839 if (insize < sizeof(uint32_t) || !indata) {
840 error = -EINVAL;
841 break;
842 }
843 if (*(uint32_t *) indata) {
844 dfs->ic->ic_dfs_state.ignore_cac = 1;
845 } else {
846 dfs->ic->ic_dfs_state.ignore_cac = 0;
847 }
848 DFS_PRINTK("%s ignore cac = 0x%x \n",
849 __func__, dfs->ic->ic_dfs_state.ignore_cac);
850 break;
851 case DFS_SET_NOL_TIMEOUT:
852 if (insize < sizeof(uint32_t) || !indata) {
853 error = -EINVAL;
854 break;
855 }
856 if (*(int *)indata) {
857 dfs->ath_dfs_nol_timeout = *(int *)indata;
858 } else {
859 dfs->ath_dfs_nol_timeout = DFS_NOL_TIMEOUT_S;
860 }
861 DFS_PRINTK("%s nol timeout = %d sec \n",
862 __func__, dfs->ath_dfs_nol_timeout);
863 break;
864#ifndef ATH_DFS_RADAR_DETECTION_ONLY
865 case DFS_MUTE_TIME:
866 if (insize < sizeof(uint32_t) || !indata) {
867 error = -EINVAL;
868 break;
869 }
870 data = (uint32_t *) indata;
871 dfs->ath_dfstesttime = *data;
872 dfs->ath_dfstesttime *= (1000); /* convert sec into ms */
873 break;
874 case DFS_GET_USENOL:
875 if (!outdata || !outsize || *outsize < sizeof(uint32_t)) {
876 error = -EINVAL;
877 break;
878 }
879 *outsize = sizeof(uint32_t);
880 *((uint32_t *) outdata) = dfs->dfs_rinfo.rn_use_nol;
881
882 for (i = 0;
883 (i < DFS_EVENT_LOG_SIZE) && (i < dfs->dfs_event_log_count);
884 i++) {
885 /* DFS_DPRINTK(sc, ATH_DEBUG_DFS,"ts=%llu diff_ts=%u rssi=%u dur=%u\n", dfs->radar_log[i].ts, dfs->radar_log[i].diff_ts, dfs->radar_log[i].rssi, dfs->radar_log[i].dur); */
886
887 }
888 dfs->dfs_event_log_count = 0;
889 dfs->dfs_phyerr_count = 0;
890 dfs->dfs_phyerr_reject_count = 0;
891 dfs->dfs_phyerr_queued_count = 0;
892 dfs->dfs_phyerr_freq_min = 0x7fffffff;
893 dfs->dfs_phyerr_freq_max = 0;
894 break;
895 case DFS_SET_USENOL:
896 if (insize < sizeof(uint32_t) || !indata) {
897 error = -EINVAL;
898 break;
899 }
900 dfs->dfs_rinfo.rn_use_nol = *(uint32_t *) indata;
901 /* iwpriv markdfs in linux can do the same thing... */
902 break;
903 case DFS_GET_NOL:
904 if (!outdata || !outsize
905 || *outsize < sizeof(struct dfsreq_nolinfo)) {
906 error = -EINVAL;
907 break;
908 }
909 *outsize = sizeof(struct dfsreq_nolinfo);
910 nol = (struct dfsreq_nolinfo *)outdata;
911 dfs_get_nol(dfs, (struct dfsreq_nolelem *)nol->dfs_nol,
912 &nol->ic_nchans);
913 dfs_print_nol(dfs);
914 break;
915 case DFS_SET_NOL:
916 if (insize < sizeof(struct dfsreq_nolinfo) || !indata) {
917 error = -EINVAL;
918 break;
919 }
920 nol = (struct dfsreq_nolinfo *)indata;
921 dfs_set_nol(dfs, (struct dfsreq_nolelem *)nol->dfs_nol,
922 nol->ic_nchans);
923 break;
924
925 case DFS_SHOW_NOL:
926 dfs_print_nol(dfs);
927 break;
928 case DFS_BANGRADAR:
929#if 0 /* MERGE_TBD */
930 if (sc->sc_nostabeacons) {
931 printk("No radar detection Enabled \n");
932 break;
933 }
934#endif
935 dfs->dfs_bangradar = 1;
936 dfs->ath_radar_tasksched = 1;
937 OS_SET_TIMER(&dfs->ath_dfs_task_timer, 0);
938 break;
939#endif /* ATH_DFS_RADAR_DETECTION_ONLY */
940 default:
941 error = -EINVAL;
942 }
943bad:
944 return error;
945}
946
947int
948dfs_set_thresholds(struct ieee80211com *ic, const uint32_t threshtype,
949 const uint32_t value)
950{
951 struct ath_dfs *dfs = (struct ath_dfs *)ic->ic_dfs;
952 int16_t chanindex;
953 struct dfs_state *rs;
954 struct ath_dfs_phyerr_param pe;
955 int is_fastclk = 0; /* XXX throw-away */
956
957 if (dfs == NULL) {
958 DFS_DPRINTK(dfs, ATH_DEBUG_DFS1, "%s: ic_dfs is NULL\n",
959 __func__);
960 return 0;
961 }
962
963 chanindex = dfs->dfs_curchan_radindex;
964 if ((chanindex < 0) || (chanindex >= DFS_NUM_RADAR_STATES)) {
965 DFS_DPRINTK(dfs, ATH_DEBUG_DFS1,
966 "%s: chanindex = %d, DFS_NUM_RADAR_STATES=%d\n",
967 __func__, chanindex, DFS_NUM_RADAR_STATES);
968 return 0;
969 }
970
971 DFS_DPRINTK(dfs, ATH_DEBUG_DFS,
972 "%s: threshtype=%d, value=%d\n", __func__, threshtype,
973 value);
974
975 ath_dfs_phyerr_init_noval(&pe);
976
977 rs = &(dfs->dfs_radar[chanindex]);
978 switch (threshtype) {
979 case DFS_PARAM_FIRPWR:
980 rs->rs_param.pe_firpwr = (int32_t) value;
981 pe.pe_firpwr = value;
982 break;
983 case DFS_PARAM_RRSSI:
984 rs->rs_param.pe_rrssi = value;
985 pe.pe_rrssi = value;
986 break;
987 case DFS_PARAM_HEIGHT:
988 rs->rs_param.pe_height = value;
989 pe.pe_height = value;
990 break;
991 case DFS_PARAM_PRSSI:
992 rs->rs_param.pe_prssi = value;
993 pe.pe_prssi = value;
994 break;
995 case DFS_PARAM_INBAND:
996 rs->rs_param.pe_inband = value;
997 pe.pe_inband = value;
998 break;
999 /* 5413 specific */
1000 case DFS_PARAM_RELPWR:
1001 rs->rs_param.pe_relpwr = value;
1002 pe.pe_relpwr = value;
1003 break;
1004 case DFS_PARAM_RELSTEP:
1005 rs->rs_param.pe_relstep = value;
1006 pe.pe_relstep = value;
1007 break;
1008 case DFS_PARAM_MAXLEN:
1009 rs->rs_param.pe_maxlen = value;
1010 pe.pe_maxlen = value;
1011 break;
1012 default:
1013 DFS_DPRINTK(dfs, ATH_DEBUG_DFS1,
1014 "%s: unknown threshtype (%d)\n",
1015 __func__, threshtype);
1016 break;
1017 }
1018
1019 /*
1020 * The driver layer dfs_enable routine is tasked with translating
1021 * values from the global format to the per-device (HAL, offload)
1022 * format.
1023 */
1024 ic->ic_dfs_enable(ic, &is_fastclk, &pe);
1025 return 1;
1026}
1027
1028int
1029dfs_get_thresholds(struct ieee80211com *ic, struct ath_dfs_phyerr_param *param)
1030{
1031 /* UMACDFS : TODO:ath_hal_getdfsthresh(sc->sc_ah, param); */
1032
1033 OS_MEMZERO(param, sizeof(*param));
1034
1035 (void)ic->ic_dfs_get_thresholds(ic, param);
1036
1037 return 1;
1038}
1039
1040uint16_t dfs_usenol(struct ieee80211com *ic)
1041{
1042 struct ath_dfs *dfs = (struct ath_dfs *)ic->ic_dfs;
1043 return dfs ? (uint16_t) dfs->dfs_rinfo.rn_use_nol : 0;
1044}
1045
1046uint16_t dfs_isdfsregdomain(struct ieee80211com *ic)
1047{
1048 struct ath_dfs *dfs = (struct ath_dfs *)ic->ic_dfs;
1049 return dfs ? dfs->dfsdomain : 0;
1050}
1051
1052#endif /* ATH_UPPORT_DFS */