blob: a2b5cff32ac1fb5f24b3eb3b6a9ceb6f202a32b7 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Anurag Chouhan6d760662016-02-20 16:05:43 +05302 * Copyright (c) 2002-2016 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28/*===========================================================================
29
Anurag Chouhan6d760662016-02-20 16:05:43 +053030 dfs.c
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080031
32 OVERVIEW:
33
34 Source code borrowed from QCA_MAIN DFS module
35
36 DEPENDENCIES:
37
38 Are listed for each API below.
39
40 ===========================================================================*/
41
42/*===========================================================================
43
Anurag Chouhan6d760662016-02-20 16:05:43 +053044 EDIT HISTORY FOR FILE
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080045
46 This section contains comments describing changes made to the module.
47 Notice that changes are listed in reverse chronological order.
48
49 when who what, where, why
50 ---------- --- --------------------------------------------------------
51
52 ===========================================================================*/
53
54#include <osdep.h>
55
56#ifndef ATH_SUPPORT_DFS
57#define ATH_SUPPORT_DFS 1
58
59/* #include "if_athioctl.h" */
60/* #include "if_athvar.h" */
61#include "dfs_ioctl.h"
62#include "dfs.h"
63
Amar Singhala7bb01b2016-01-27 11:31:59 -080064int domainoverride = DFS_UNINIT_REGION;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080065
66/*
67** channel switch announcement (CSA)
68** usenol=1 (default) make CSA and switch to a new channel on radar detect
69** usenol=0, make CSA with next channel same as current on radar detect
70** usenol=2, no CSA and stay on the same channel on radar detect
71**/
72
73int usenol = 1;
74uint32_t dfs_debug_level = ATH_DEBUG_DFS;
75
76#if 0 /* the code to call this is curently commented-out below */
77/*
78 * Mark a channel as having interference detected upon it.
79 *
80 * This adds the interference marker to both the primary and
81 * extension channel.
82 *
83 * XXX TODO: make the NOL and channel interference logic a bit smarter
84 * so only the channel with the radar event is marked, rather than
85 * both the primary and extension.
86 */
87static void
Chandrasekaran, Manishekar22a7e1e2015-11-05 10:38:49 +053088dfs_channel_mark_radar(struct ath_dfs *dfs, struct dfs_ieee80211_channel *chan)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080089{
90 struct ieee80211_channel_list chan_info;
91 int i;
92
93 /* chan->ic_flagext |= CHANNEL_INTERFERENCE; */
94
95 /*
96 * If radar is detected in 40MHz mode, add both the primary and the
97 * extension channels to the NOL. chan is the channel data we return
98 * to the ath_dev layer which passes it on to the 80211 layer.
99 * As we want the AP to change channels and send out a CSA,
100 * we always pass back the primary channel data to the ath_dev layer.
101 */
102 if ((dfs->dfs_rinfo.rn_use_nol == 1) &&
103 (dfs->ic->ic_opmode == IEEE80211_M_HOSTAP ||
104 dfs->ic->ic_opmode == IEEE80211_M_IBSS)) {
105 chan_info.cl_nchans = 0;
106 dfs->ic->ic_get_ext_chan_info(dfs->ic, &chan_info);
107
108 for (i = 0; i < chan_info.cl_nchans; i++) {
109 if (chan_info.cl_channels[i] == NULL) {
110 DFS_PRINTK("%s: NULL channel\n", __func__);
111 } else {
112 chan_info.cl_channels[i]->ic_flagext |=
113 CHANNEL_INTERFERENCE;
114 dfs_nol_addchan(dfs, chan_info.cl_channels[i],
115 dfs->ath_dfs_nol_timeout);
116 }
117 }
118
119 /*
120 * Update the umac/driver channels with the new NOL information.
121 */
122 dfs_nol_update(dfs);
123 }
124}
125#endif /* #if 0 */
126
127static os_timer_func(dfs_task)
128{
129 struct ieee80211com *ic;
130 struct ath_dfs *dfs = NULL;
131
132 OS_GET_TIMER_ARG(ic, struct ieee80211com *);
133 dfs = (struct ath_dfs *)ic->ic_dfs;
134 /*
135 * XXX no locking?!
136 */
137 if (dfs_process_radarevent(dfs, ic->ic_curchan)) {
138#ifndef ATH_DFS_RADAR_DETECTION_ONLY
139
140 /*
141 * This marks the channel (and the extension channel, if HT40) as
142 * having seen a radar event. It marks CHAN_INTERFERENCE and
143 * will add it to the local NOL implementation.
144 *
145 * This is only done for 'usenol=1', as the other two modes
146 * don't do radar notification or CAC/CSA/NOL; it just notes
147 * there was a radar.
148 */
149
150 if (dfs->dfs_rinfo.rn_use_nol == 1) {
151 /* dfs_channel_mark_radar(dfs, ic->ic_curchan); */
152 }
153#endif /* ATH_DFS_RADAR_DETECTION_ONLY */
154
155 /*
156 * This calls into the umac DFS code, which sets the umac related
157 * radar flags and begins the channel change machinery.
158 *
159 * XXX TODO: the umac NOL code isn't used, but IEEE80211_CHAN_RADAR
160 * still gets set. Since the umac NOL code isn't used, that flag
161 * is never cleared. This needs to be fixed. See EV 105776.
162 */
163 if (dfs->dfs_rinfo.rn_use_nol == 1) {
164 ic->ic_dfs_notify_radar(ic, ic->ic_curchan);
165 } else if (dfs->dfs_rinfo.rn_use_nol == 0) {
166 /*
167 * For the test mode, don't do a CSA here; but setup the
168 * test timer so we get a CSA _back_ to the original channel.
169 */
170 OS_CANCEL_TIMER(&dfs->ath_dfstesttimer);
171 dfs->ath_dfstest = 1;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530172 qdf_spin_lock_bh(&ic->chan_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800173 dfs->ath_dfstest_ieeechan = ic->ic_curchan->ic_ieee;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530174 qdf_spin_unlock_bh(&ic->chan_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800175 dfs->ath_dfstesttime = 1; /* 1ms */
176 OS_SET_TIMER(&dfs->ath_dfstesttimer,
177 dfs->ath_dfstesttime);
178 }
179 }
180 dfs->ath_radar_tasksched = 0;
181}
182
183static os_timer_func(dfs_testtimer_task)
184{
185 struct ieee80211com *ic;
186 struct ath_dfs *dfs = NULL;
187
188 OS_GET_TIMER_ARG(ic, struct ieee80211com *);
189 dfs = (struct ath_dfs *)ic->ic_dfs;
190
191 /* XXX no locking? */
192 dfs->ath_dfstest = 0;
193
194 /*
195 * Flip the channel back to the original channel.
196 * Make sure this is done properly with a CSA.
197 */
198 DFS_PRINTK("%s: go back to channel %d\n",
199 __func__, dfs->ath_dfstest_ieeechan);
200
201 /*
202 * XXX The mere existence of this method indirection
203 * to a umac function means this code belongs in
204 * the driver, _not_ here. Please fix this!
205 */
206 ic->ic_start_csa(ic, dfs->ath_dfstest_ieeechan);
207}
208
209static int dfs_get_debug_info(struct ieee80211com *ic, int type, void *data)
210{
211 struct ath_dfs *dfs = (struct ath_dfs *)ic->ic_dfs;
212 if (data) {
213 *(uint32_t *) data = dfs->dfs_proc_phyerr;
214 }
215 return (int)dfs->dfs_proc_phyerr;
216}
217
Krishna Kumaar Natarajan047e7d52016-02-11 16:30:02 -0800218/**
219 * dfs_alloc_mem_filter() - allocate memory for dfs ft_filters
220 * @radarf: pointer holding ft_filters
221 *
222 * Return: 0-success and 1-failure
223*/
224static int dfs_alloc_mem_filter(struct dfs_filtertype *radarf)
225{
226 int status = 0, n, i;
227
228 for (i = 0; i < DFS_MAX_NUM_RADAR_FILTERS; i++) {
229 radarf->ft_filters[i] = qdf_mem_malloc(sizeof(struct
230 dfs_filter));
231 if (NULL == radarf->ft_filters[i]) {
232 DFS_PRINTK("%s[%d]: mem alloc failed\n",
233 __func__, __LINE__);
234 status = 1;
235 goto error;
236 }
237 }
238
239 return status;
240
241error:
242 /* free up allocated memory */
243 for (n = 0; n < i; n++) {
244 if (radarf->ft_filters[n]) {
245 qdf_mem_free(radarf->ft_filters[n]);
246 radarf->ft_filters[i] = NULL;
247 }
248 }
249
250 DFS_PRINTK("%s[%d]: cannot allocate memory for radar filter types\n",
251 __func__, __LINE__);
252
253 return status;
254}
255
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800256int dfs_attach(struct ieee80211com *ic)
257{
258 int i, n;
259 struct ath_dfs *dfs = (struct ath_dfs *)ic->ic_dfs;
260 struct ath_dfs_radar_tab_info radar_info;
261
262 if (dfs != NULL) {
263 /*DFS_DPRINTK(dfs, ATH_DEBUG_DFS1,
264 "%s: ic_dfs was not NULL\n",
265 __func__);
266 */
267 return 1;
268 }
269
Anurag Chouhan2ed1fce2016-02-22 15:07:01 +0530270 dfs = (struct ath_dfs *)os_malloc(NULL, sizeof(struct ath_dfs),
271 GFP_ATOMIC);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800272
273 if (dfs == NULL) {
274 /*DFS_DPRINTK(dfs, ATH_DEBUG_DFS1,
275 "%s: ath_dfs allocation failed\n", __func__); */
276 return 1;
277 }
278
279 OS_MEMZERO(dfs, sizeof(struct ath_dfs));
280
281 ic->ic_dfs = (void *)dfs;
282
283 dfs->ic = ic;
284
285 ic->ic_dfs_debug = dfs_get_debug_info;
286#ifndef ATH_DFS_RADAR_DETECTION_ONLY
287 dfs->dfs_nol = NULL;
288#endif
289
290 /*
291 * Zero out radar_info. It's possible that the attach function won't
292 * fetch an initial regulatory configuration; you really do want to
293 * ensure that the contents indicates there aren't any filters.
294 */
295 OS_MEMZERO(&radar_info, sizeof(radar_info));
296 ic->ic_dfs_attach(ic, &dfs->dfs_caps, &radar_info);
297 dfs_clear_stats(ic);
298 dfs->dfs_event_log_on = 0;
299 OS_INIT_TIMER(NULL, &(dfs->ath_dfs_task_timer), dfs_task, (void *)(ic),
Anurag Chouhan6d760662016-02-20 16:05:43 +0530300 QDF_TIMER_TYPE_SW);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800301#ifndef ATH_DFS_RADAR_DETECTION_ONLY
302 OS_INIT_TIMER(NULL, &(dfs->ath_dfstesttimer), dfs_testtimer_task,
Anurag Chouhan6d760662016-02-20 16:05:43 +0530303 (void *)ic, QDF_TIMER_TYPE_SW);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800304 dfs->ath_dfs_cac_time = ATH_DFS_WAIT_MS;
305 dfs->ath_dfstesttime = ATH_DFS_TEST_RETURN_PERIOD_MS;
306#endif
307 ATH_DFSQ_LOCK_INIT(dfs);
308 STAILQ_INIT(&dfs->dfs_radarq);
309 ATH_ARQ_LOCK_INIT(dfs);
310 STAILQ_INIT(&dfs->dfs_arq);
311 STAILQ_INIT(&(dfs->dfs_eventq));
312 ATH_DFSEVENTQ_LOCK_INIT(dfs);
313 dfs->events = (struct dfs_event *)os_malloc(NULL,
314 sizeof(struct dfs_event) *
315 DFS_MAX_EVENTS, GFP_ATOMIC);
316 if (dfs->events == NULL) {
317 OS_FREE(dfs);
318 ic->ic_dfs = NULL;
319 DFS_PRINTK("%s: events allocation failed\n", __func__);
320 return 1;
321 }
322 for (i = 0; i < DFS_MAX_EVENTS; i++) {
323 STAILQ_INSERT_TAIL(&(dfs->dfs_eventq), &dfs->events[i],
324 re_list);
325 }
326
327 dfs->pulses =
328 (struct dfs_pulseline *)os_malloc(NULL,
329 sizeof(struct dfs_pulseline),
330 GFP_ATOMIC);
331 if (dfs->pulses == NULL) {
332 OS_FREE(dfs->events);
333 dfs->events = NULL;
334 OS_FREE(dfs);
335 ic->ic_dfs = NULL;
336 DFS_PRINTK("%s: pulse buffer allocation failed\n", __func__);
337 return 1;
338 }
339
Rakesh Sunkif7f82e52015-12-14 15:09:40 -0800340 /*
341 * If the chip supports DFS-3 then allocate
342 * memory for pulses for extension segment.
343 */
344 if (ic->dfs_hw_bd_id != DFS_HWBD_QCA6174) {
345 dfs->pulses_ext_seg = (struct dfs_pulseline *)
346 os_malloc(NULL,
347 sizeof(struct dfs_pulseline),
348 GFP_ATOMIC);
349 if (dfs->pulses_ext_seg == NULL) {
350 OS_FREE(dfs->events);
351 dfs->events = NULL;
352 OS_FREE(dfs);
353 ic->ic_dfs = NULL;
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530354 QDF_TRACE(QDF_MODULE_ID_SAP, QDF_TRACE_LEVEL_ERROR,
Rakesh Sunkif7f82e52015-12-14 15:09:40 -0800355 "%s[%d]: pulse buffer allocation failed",
356 __func__, __LINE__);
357 return 1;
358 }
359 dfs->pulses_ext_seg->pl_lastelem = DFS_MAX_PULSE_BUFFER_MASK;
360 }
361
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800362 dfs->pulses->pl_lastelem = DFS_MAX_PULSE_BUFFER_MASK;
363
364 /* Allocate memory for radar filters */
365 for (n = 0; n < DFS_MAX_RADAR_TYPES; n++) {
Krishna Kumaar Natarajan047e7d52016-02-11 16:30:02 -0800366 dfs->dfs_radarf[n] = (struct dfs_filtertype *)
367 os_malloc(NULL, sizeof(struct dfs_filtertype),
368 GFP_ATOMIC);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800369 if (dfs->dfs_radarf[n] == NULL) {
370 DFS_PRINTK
371 ("%s: cannot allocate memory for radar filter types\n",
372 __func__);
373 goto bad1;
Krishna Kumaar Natarajan047e7d52016-02-11 16:30:02 -0800374 } else {
375 qdf_mem_zero(dfs->dfs_radarf[n],
376 sizeof(struct dfs_filtertype));
377 if (0 != dfs_alloc_mem_filter(dfs->dfs_radarf[n]))
378 goto bad1;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800379 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800380 }
381 /* Allocate memory for radar table */
382 dfs->dfs_radartable =
Anurag Chouhan6d760662016-02-20 16:05:43 +0530383 (int8_t **) os_malloc(NULL, 256 * sizeof(int8_t *), GFP_ATOMIC);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800384 if (dfs->dfs_radartable == NULL) {
385 DFS_PRINTK("%s: cannot allocate memory for radar table\n",
386 __func__);
387 goto bad1;
388 }
389 for (n = 0; n < 256; n++) {
390 dfs->dfs_radartable[n] =
391 os_malloc(NULL, DFS_MAX_RADAR_OVERLAP * sizeof(int8_t),
392 GFP_ATOMIC);
393 if (dfs->dfs_radartable[n] == NULL) {
394 DFS_PRINTK
395 ("%s: cannot allocate memory for radar table entry\n",
396 __func__);
397 goto bad2;
398 }
399 }
400
401 if (usenol == 0)
402 DFS_PRINTK("%s: NOL disabled\n", __func__);
403 else if (usenol == 2)
404 DFS_PRINTK("%s: NOL disabled; no CSA\n", __func__);
405
406 dfs->dfs_rinfo.rn_use_nol = usenol;
407
408 /* Init the cached extension channel busy for false alarm reduction */
409 dfs->dfs_rinfo.ext_chan_busy_ts = ic->ic_get_TSF64(ic);
410 dfs->dfs_rinfo.dfs_ext_chan_busy = 0;
411 /* Init the Bin5 chirping related data */
412 dfs->dfs_rinfo.dfs_bin5_chirp_ts = dfs->dfs_rinfo.ext_chan_busy_ts;
413 dfs->dfs_rinfo.dfs_last_bin5_dur = MAX_BIN5_DUR;
414 dfs->dfs_b5radars = NULL;
Rakesh Sunkif7f82e52015-12-14 15:09:40 -0800415 if (ic->dfs_hw_bd_id != DFS_HWBD_QCA6174) {
416 dfs->dfs_rinfo.dfs_last_bin5_dur_ext_seg = MAX_BIN5_DUR;
417 dfs->dfs_b5radars_ext_seg = NULL;
418 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800419
420 /*
421 * If dfs_init_radar_filters() fails, we can abort here and
422 * reconfigure when the first valid channel + radar config
423 * is available.
424 */
425 if (dfs_init_radar_filters(ic, &radar_info)) {
426 DFS_PRINTK(" %s: Radar Filter Intialization Failed \n",
427 __func__);
428 return 1;
429 }
430
431 dfs->ath_dfs_false_rssi_thres = RSSI_POSSIBLY_FALSE;
432 dfs->ath_dfs_peak_mag = SEARCH_FFT_REPORT_PEAK_MAG_THRSH;
433 dfs->dfs_phyerr_freq_min = 0x7fffffff;
434 dfs->dfs_phyerr_freq_max = 0;
435 dfs->dfs_phyerr_queued_count = 0;
436 dfs->dfs_phyerr_w53_counter = 0;
437 dfs->dfs_pri_multiplier = 2;
438
439 dfs->ath_dfs_nol_timeout = DFS_NOL_TIMEOUT_S;
440 return 0;
441
442bad2:
443 OS_FREE(dfs->dfs_radartable);
444 dfs->dfs_radartable = NULL;
445bad1:
446 for (n = 0; n < DFS_MAX_RADAR_TYPES; n++) {
447 if (dfs->dfs_radarf[n] != NULL) {
448 OS_FREE(dfs->dfs_radarf[n]);
449 dfs->dfs_radarf[n] = NULL;
450 }
451 }
452 if (dfs->pulses) {
453 OS_FREE(dfs->pulses);
454 dfs->pulses = NULL;
455 }
Rakesh Sunkif7f82e52015-12-14 15:09:40 -0800456 if (dfs->pulses_ext_seg &&
457 ic->dfs_hw_bd_id != DFS_HWBD_QCA6174) {
458 OS_FREE(dfs->pulses_ext_seg);
459 dfs->pulses_ext_seg = NULL;
460 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800461 if (dfs->events) {
462 OS_FREE(dfs->events);
463 dfs->events = NULL;
464 }
465
466 if (ic->ic_dfs) {
467 OS_FREE(ic->ic_dfs);
468 ic->ic_dfs = NULL;
469 }
470 return 1;
471#undef N
472}
473
Krishna Kumaar Natarajan047e7d52016-02-11 16:30:02 -0800474/**
475 * dfs_free_filter() - free memory allocated for dfs ft_filters
476 * @radarf: pointer holding ft_filters
477 *
478 * Return: NA
479*/
480static void dfs_free_filter(struct dfs_filtertype *radarf)
481{
482 int i;
483
484 for (i = 0; i < DFS_MAX_NUM_RADAR_FILTERS; i++) {
485 if (radarf->ft_filters[i]) {
486 qdf_mem_free(radarf->ft_filters[i]);
487 radarf->ft_filters[i] = NULL;
488 }
489 }
490}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800491void dfs_detach(struct ieee80211com *ic)
492{
493 struct ath_dfs *dfs = (struct ath_dfs *)ic->ic_dfs;
494 int n, empty;
495
496 if (dfs == NULL) {
497 DFS_DPRINTK(dfs, ATH_DEBUG_DFS1, "%s: ic_dfs is NULL\n",
498 __func__);
499 return;
500 }
501
502 /* Bug 29099 make sure all outstanding timers are cancelled */
503
504 if (dfs->ath_radar_tasksched) {
505 OS_CANCEL_TIMER(&dfs->ath_dfs_task_timer);
506 dfs->ath_radar_tasksched = 0;
507 }
508
509 if (dfs->ath_dfstest) {
510 OS_CANCEL_TIMER(&dfs->ath_dfstesttimer);
511 dfs->ath_dfstest = 0;
512 }
513#if 0
514#ifndef ATH_DFS_RADAR_DETECTION_ONLY
515 if (dfs->ic_dfswait) {
516 OS_CANCEL_TIMER(&dfs->ic_dfswaittimer);
517 dfs->ath_dfswait = 0;
518 }
519
520 OS_CANCEL_TIMER(&dfs->sc_dfs_war_timer);
521 if (dfs->dfs_nol != NULL) {
522 struct dfs_nolelem *nol, *next;
523 nol = dfs->dfs_nol;
524 /* Bug 29099 - each NOL element has its own timer, cancel it and
525 free the element */
526 while (nol != NULL) {
527 OS_CANCEL_TIMER(&nol->nol_timer);
528 next = nol->nol_next;
529 OS_FREE(nol);
530 nol = next;
531 }
532 dfs->dfs_nol = NULL;
533 }
534#endif
535#endif
Rakesh Sunkif7f82e52015-12-14 15:09:40 -0800536
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800537 /* Return radar events to free q */
538 dfs_reset_radarq(dfs);
Rakesh Sunkif7f82e52015-12-14 15:09:40 -0800539 dfs_reset_alldelaylines(dfs, DFS_80P80_SEG0);
540 if (ic->dfs_hw_bd_id != DFS_HWBD_QCA6174)
541 dfs_reset_alldelaylines(dfs, DFS_80P80_SEG1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800542
543 /* Free up pulse log */
544 if (dfs->pulses != NULL) {
545 OS_FREE(dfs->pulses);
546 dfs->pulses = NULL;
547 }
548
Krishna Kumaar Natarajan0379b592016-02-09 16:18:31 -0800549 if (dfs->pulses_ext_seg != NULL) {
Rakesh Sunkif7f82e52015-12-14 15:09:40 -0800550 OS_FREE(dfs->pulses_ext_seg);
551 dfs->pulses_ext_seg = NULL;
552 }
553
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800554 for (n = 0; n < DFS_MAX_RADAR_TYPES; n++) {
555 if (dfs->dfs_radarf[n] != NULL) {
Krishna Kumaar Natarajan047e7d52016-02-11 16:30:02 -0800556 dfs_free_filter(dfs->dfs_radarf[n]);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800557 OS_FREE(dfs->dfs_radarf[n]);
558 dfs->dfs_radarf[n] = NULL;
559 }
560 }
561
562 if (dfs->dfs_radartable != NULL) {
563 for (n = 0; n < 256; n++) {
564 if (dfs->dfs_radartable[n] != NULL) {
565 OS_FREE(dfs->dfs_radartable[n]);
566 dfs->dfs_radartable[n] = NULL;
567 }
568 }
569 OS_FREE(dfs->dfs_radartable);
570 dfs->dfs_radartable = NULL;
571#ifndef ATH_DFS_RADAR_DETECTION_ONLY
572 dfs->ath_dfs_isdfsregdomain = 0;
573#endif
574 }
575
576 if (dfs->dfs_b5radars != NULL) {
577 OS_FREE(dfs->dfs_b5radars);
578 dfs->dfs_b5radars = NULL;
579 }
Krishna Kumaar Natarajan0379b592016-02-09 16:18:31 -0800580 if (dfs->dfs_b5radars_ext_seg != NULL) {
Rakesh Sunkif7f82e52015-12-14 15:09:40 -0800581 OS_FREE(dfs->dfs_b5radars_ext_seg);
582 dfs->dfs_b5radars_ext_seg = NULL;
583 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800584
585/* Commenting out since all the ar functions are obsolete and
586 * the function definition has been removed as part of dfs_ar.c
587 * dfs_reset_ar(dfs);
588 */
589 ATH_ARQ_LOCK(dfs);
590 empty = STAILQ_EMPTY(&(dfs->dfs_arq));
591 ATH_ARQ_UNLOCK(dfs);
592 if (!empty) {
593/*
594 * Commenting out since all the ar functions are obsolete and
595 * the function definition has been removed as part of dfs_ar.c
596 *
597 * dfs_reset_arq(dfs);
598 */
599 }
600 if (dfs->events != NULL) {
601 OS_FREE(dfs->events);
602 dfs->events = NULL;
603 }
604 dfs_nol_timer_cleanup(dfs);
605 OS_FREE(dfs);
606
607 /* XXX? */
608 ic->ic_dfs = NULL;
609}
610
611/*
612 * This is called each time a channel change occurs, to (potentially) enable
613 * the radar code.
614 */
615int dfs_radar_disable(struct ieee80211com *ic)
616{
617 struct ath_dfs *dfs = (struct ath_dfs *)ic->ic_dfs;
618#ifdef ATH_ENABLE_AR
619 dfs->dfs_proc_phyerr &= ~DFS_AR_EN;
620#endif
621 dfs->dfs_proc_phyerr &= ~DFS_RADAR_EN;
622 return 0;
623}
624
625/*
626 * This is called each time a channel change occurs, to (potentially) enable
627 * the radar code.
628 */
629int dfs_radar_enable(struct ieee80211com *ic,
630 struct ath_dfs_radar_tab_info *radar_info)
631{
632 int is_ext_ch;
633 int is_fastclk = 0;
634 int radar_filters_init_status = 0;
635 /* uint32_t rfilt; */
636 struct ath_dfs *dfs;
637 struct dfs_state *rs_pri, *rs_ext;
Chandrasekaran, Manishekar22a7e1e2015-11-05 10:38:49 +0530638 struct dfs_ieee80211_channel *chan = ic->ic_curchan, *ext_ch = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800639 is_ext_ch = IEEE80211_IS_CHAN_11N_HT40(ic->ic_curchan);
640 dfs = (struct ath_dfs *)ic->ic_dfs;
641 rs_pri = NULL;
642 rs_ext = NULL;
643#if 0
644 int i;
645#endif
646 if (dfs == NULL) {
647 DFS_DPRINTK(dfs, ATH_DEBUG_DFS, "%s: ic_dfs is NULL\n",
648 __func__);
649
650 return -EIO;
651 }
652 ic->ic_dfs_disable(ic);
653
654 /*
655 * Setting country code might change the DFS domain
656 * so initialize the DFS Radar filters
657 */
658 radar_filters_init_status = dfs_init_radar_filters(ic, radar_info);
659
660 /*
661 * dfs_init_radar_filters() returns 1 on failure and
662 * 0 on success.
663 */
664 if (DFS_STATUS_FAIL == radar_filters_init_status) {
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530665 QDF_TRACE(QDF_MODULE_ID_SAP, QDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800666 "%s[%d]: DFS Radar Filters Initialization Failed",
667 __func__, __LINE__);
668 return -EIO;
669 }
670
671 if ((ic->ic_opmode == IEEE80211_M_HOSTAP
672 || ic->ic_opmode == IEEE80211_M_IBSS)) {
673
674 if (IEEE80211_IS_CHAN_DFS(chan)) {
675
676 uint8_t index_pri, index_ext;
677#ifdef ATH_ENABLE_AR
678 dfs->dfs_proc_phyerr |= DFS_AR_EN;
679#endif
680 dfs->dfs_proc_phyerr |= DFS_RADAR_EN;
681
682 if (is_ext_ch) {
683 ext_ch = ieee80211_get_extchan(ic);
684 }
Rakesh Sunkif7f82e52015-12-14 15:09:40 -0800685 dfs_reset_alldelaylines(dfs, DFS_80P80_SEG0);
686 /*
687 * Extension segment delaylines will be
688 * enabled only when SAP operates in 80p80
689 * and both the channels are DFS.
690 */
691 if (chan->ic_80p80_both_dfs)
692 dfs_reset_alldelaylines(dfs, DFS_80P80_SEG1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800693
694 rs_pri = dfs_getchanstate(dfs, &index_pri, 0);
695 if (ext_ch) {
696 rs_ext = dfs_getchanstate(dfs, &index_ext, 1);
697 }
698 if (rs_pri != NULL
699 && ((ext_ch == NULL) || (rs_ext != NULL))) {
700 struct ath_dfs_phyerr_param pe;
701
702 OS_MEMSET(&pe, '\0', sizeof(pe));
703
Rakesh Sunkif7f82e52015-12-14 15:09:40 -0800704 if (index_pri != dfs->dfs_curchan_radindex) {
705 dfs_reset_alldelaylines(dfs,
706 DFS_80P80_SEG0);
707 /*
708 * Reset only when ext segment is
709 * present
710 */
711 if (chan->ic_80p80_both_dfs)
712 dfs_reset_alldelaylines(dfs,
713 DFS_80P80_SEG1);
714 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800715 dfs->dfs_curchan_radindex = (int16_t) index_pri;
716 dfs->dfs_pri_multiplier_ini =
717 radar_info->dfs_pri_multiplier;
718
719 if (rs_ext)
720 dfs->dfs_extchan_radindex =
721 (int16_t) index_ext;
722
723 ath_dfs_phyerr_param_copy(&pe,
724 &rs_pri->rs_param);
725 DFS_DPRINTK(dfs, ATH_DEBUG_DFS3,
726 "%s: firpwr=%d, rssi=%d, height=%d, "
727 "prssi=%d, inband=%d, relpwr=%d, "
728 "relstep=%d, maxlen=%d\n",
729 __func__,
730 pe.pe_firpwr,
731 pe.pe_rrssi,
732 pe.pe_height,
733 pe.pe_prssi,
734 pe.pe_inband,
735 pe.pe_relpwr,
736 pe.pe_relstep, pe.pe_maxlen);
737
738 ic->ic_dfs_enable(ic, &is_fastclk, &pe);
739 DFS_DPRINTK(dfs, ATH_DEBUG_DFS,
740 "Enabled radar detection on channel %d\n",
741 chan->ic_freq);
742 dfs->dur_multiplier =
743 is_fastclk ? DFS_FAST_CLOCK_MULTIPLIER :
744 DFS_NO_FAST_CLOCK_MULTIPLIER;
745 DFS_DPRINTK(dfs, ATH_DEBUG_DFS3,
746 "%s: duration multiplier is %d\n",
747 __func__, dfs->dur_multiplier);
748 } else
749 DFS_DPRINTK(dfs, ATH_DEBUG_DFS,
750 "%s: No more radar states left\n",
751 __func__);
752 }
753 }
754
755 return DFS_STATUS_SUCCESS;
756}
757
758int
759dfs_control(struct ieee80211com *ic, u_int id,
760 void *indata, uint32_t insize, void *outdata, uint32_t *outsize)
761{
762 int error = 0;
763 struct ath_dfs_phyerr_param peout;
764 struct ath_dfs *dfs = (struct ath_dfs *)ic->ic_dfs;
765 struct dfs_ioctl_params *dfsparams;
766 uint32_t val = 0;
767#ifndef ATH_DFS_RADAR_DETECTION_ONLY
768 struct dfsreq_nolinfo *nol;
769 uint32_t *data = NULL;
770#endif /* ATH_DFS_RADAR_DETECTION_ONLY */
771 int i;
772
773 if (dfs == NULL) {
774 error = -EINVAL;
775 DFS_DPRINTK(dfs, ATH_DEBUG_DFS1, "%s DFS is null\n", __func__);
776 goto bad;
777 }
778
779 switch (id) {
780 case DFS_SET_THRESH:
781 if (insize < sizeof(struct dfs_ioctl_params) || !indata) {
782 DFS_DPRINTK(dfs, ATH_DEBUG_DFS1,
783 "%s: insize=%d, expected=%zu bytes, indata=%p\n",
784 __func__, insize,
785 sizeof(struct dfs_ioctl_params), indata);
786 error = -EINVAL;
787 break;
788 }
789 dfsparams = (struct dfs_ioctl_params *)indata;
790 if (!dfs_set_thresholds
791 (ic, DFS_PARAM_FIRPWR, dfsparams->dfs_firpwr))
792 error = -EINVAL;
793 if (!dfs_set_thresholds
794 (ic, DFS_PARAM_RRSSI, dfsparams->dfs_rrssi))
795 error = -EINVAL;
796 if (!dfs_set_thresholds
797 (ic, DFS_PARAM_HEIGHT, dfsparams->dfs_height))
798 error = -EINVAL;
799 if (!dfs_set_thresholds
800 (ic, DFS_PARAM_PRSSI, dfsparams->dfs_prssi))
801 error = -EINVAL;
802 if (!dfs_set_thresholds
803 (ic, DFS_PARAM_INBAND, dfsparams->dfs_inband))
804 error = -EINVAL;
805 /* 5413 speicfic */
806 if (!dfs_set_thresholds
807 (ic, DFS_PARAM_RELPWR, dfsparams->dfs_relpwr))
808 error = -EINVAL;
809 if (!dfs_set_thresholds
810 (ic, DFS_PARAM_RELSTEP, dfsparams->dfs_relstep))
811 error = -EINVAL;
812 if (!dfs_set_thresholds
813 (ic, DFS_PARAM_MAXLEN, dfsparams->dfs_maxlen))
814 error = -EINVAL;
815 break;
816 case DFS_GET_THRESH:
817 if (!outdata || !outsize
818 || *outsize < sizeof(struct dfs_ioctl_params)) {
819 error = -EINVAL;
820 break;
821 }
822 *outsize = sizeof(struct dfs_ioctl_params);
823 dfsparams = (struct dfs_ioctl_params *)outdata;
824
825 /*
826 * Fetch the DFS thresholds using the internal representation.
827 */
828 (void)dfs_get_thresholds(ic, &peout);
829
830 /*
831 * Convert them to the dfs IOCTL representation.
832 */
833 ath_dfs_dfsparam_to_ioctlparam(&peout, dfsparams);
834 break;
835 case DFS_RADARDETECTS:
836 if (!outdata || !outsize || *outsize < sizeof(uint32_t)) {
837 error = -EINVAL;
838 break;
839 }
840 *outsize = sizeof(uint32_t);
841 *((uint32_t *) outdata) = dfs->ath_dfs_stats.num_radar_detects;
842 break;
843 case DFS_DISABLE_DETECT:
844 dfs->dfs_proc_phyerr &= ~DFS_RADAR_EN;
845 dfs->ic->ic_dfs_state.ignore_dfs = 1;
846 DFS_PRINTK("%s enable detects, ignore_dfs %d\n",
847 __func__, dfs->ic->ic_dfs_state.ignore_dfs);
848 break;
849 case DFS_ENABLE_DETECT:
850 dfs->dfs_proc_phyerr |= DFS_RADAR_EN;
851 dfs->ic->ic_dfs_state.ignore_dfs = 0;
852 DFS_PRINTK("%s enable detects, ignore_dfs %d\n",
853 __func__, dfs->ic->ic_dfs_state.ignore_dfs);
854 break;
855 case DFS_DISABLE_FFT:
856 /* UMACDFS: TODO: val = ath_hal_dfs_config_fft(sc->sc_ah, false); */
857 DFS_PRINTK("%s TODO disable FFT val=0x%x \n", __func__, val);
858 break;
859 case DFS_ENABLE_FFT:
860 /* UMACDFS TODO: val = ath_hal_dfs_config_fft(sc->sc_ah, true); */
861 DFS_PRINTK("%s TODO enable FFT val=0x%x \n", __func__, val);
862 break;
863 case DFS_SET_DEBUG_LEVEL:
864 if (insize < sizeof(uint32_t) || !indata) {
865 error = -EINVAL;
866 break;
867 }
868 dfs->dfs_debug_mask = *(uint32_t *) indata;
869 DFS_PRINTK("%s debug level now = 0x%x \n",
870 __func__, dfs->dfs_debug_mask);
871 if (dfs->dfs_debug_mask & ATH_DEBUG_DFS3) {
872 /* Enable debug Radar Event */
873 dfs->dfs_event_log_on = 1;
874 } else {
875 dfs->dfs_event_log_on = 0;
876 }
877 break;
878 case DFS_SET_FALSE_RSSI_THRES:
879 if (insize < sizeof(uint32_t) || !indata) {
880 error = -EINVAL;
881 break;
882 }
883 dfs->ath_dfs_false_rssi_thres = *(uint32_t *) indata;
884 DFS_PRINTK("%s false RSSI threshold now = 0x%x \n",
885 __func__, dfs->ath_dfs_false_rssi_thres);
886 break;
887 case DFS_SET_PEAK_MAG:
888 if (insize < sizeof(uint32_t) || !indata) {
889 error = -EINVAL;
890 break;
891 }
892 dfs->ath_dfs_peak_mag = *(uint32_t *) indata;
893 DFS_PRINTK("%s peak_mag now = 0x%x \n",
894 __func__, dfs->ath_dfs_peak_mag);
895 break;
896 case DFS_IGNORE_CAC:
897 if (insize < sizeof(uint32_t) || !indata) {
898 error = -EINVAL;
899 break;
900 }
901 if (*(uint32_t *) indata) {
902 dfs->ic->ic_dfs_state.ignore_cac = 1;
903 } else {
904 dfs->ic->ic_dfs_state.ignore_cac = 0;
905 }
906 DFS_PRINTK("%s ignore cac = 0x%x \n",
907 __func__, dfs->ic->ic_dfs_state.ignore_cac);
908 break;
909 case DFS_SET_NOL_TIMEOUT:
910 if (insize < sizeof(uint32_t) || !indata) {
911 error = -EINVAL;
912 break;
913 }
914 if (*(int *)indata) {
915 dfs->ath_dfs_nol_timeout = *(int *)indata;
916 } else {
917 dfs->ath_dfs_nol_timeout = DFS_NOL_TIMEOUT_S;
918 }
919 DFS_PRINTK("%s nol timeout = %d sec \n",
920 __func__, dfs->ath_dfs_nol_timeout);
921 break;
922#ifndef ATH_DFS_RADAR_DETECTION_ONLY
923 case DFS_MUTE_TIME:
924 if (insize < sizeof(uint32_t) || !indata) {
925 error = -EINVAL;
926 break;
927 }
928 data = (uint32_t *) indata;
929 dfs->ath_dfstesttime = *data;
930 dfs->ath_dfstesttime *= (1000); /* convert sec into ms */
931 break;
932 case DFS_GET_USENOL:
933 if (!outdata || !outsize || *outsize < sizeof(uint32_t)) {
934 error = -EINVAL;
935 break;
936 }
937 *outsize = sizeof(uint32_t);
938 *((uint32_t *) outdata) = dfs->dfs_rinfo.rn_use_nol;
939
940 for (i = 0;
941 (i < DFS_EVENT_LOG_SIZE) && (i < dfs->dfs_event_log_count);
942 i++) {
943 /* DFS_DPRINTK(sc, ATH_DEBUG_DFS,"ts=%llu diff_ts=%u rssi=%u dur=%u\n", dfs->radar_log[i].ts, dfs->radar_log[i].diff_ts, dfs->radar_log[i].rssi, dfs->radar_log[i].dur); */
944
945 }
946 dfs->dfs_event_log_count = 0;
947 dfs->dfs_phyerr_count = 0;
948 dfs->dfs_phyerr_reject_count = 0;
949 dfs->dfs_phyerr_queued_count = 0;
950 dfs->dfs_phyerr_freq_min = 0x7fffffff;
951 dfs->dfs_phyerr_freq_max = 0;
952 break;
953 case DFS_SET_USENOL:
954 if (insize < sizeof(uint32_t) || !indata) {
955 error = -EINVAL;
956 break;
957 }
958 dfs->dfs_rinfo.rn_use_nol = *(uint32_t *) indata;
959 /* iwpriv markdfs in linux can do the same thing... */
960 break;
961 case DFS_GET_NOL:
962 if (!outdata || !outsize
963 || *outsize < sizeof(struct dfsreq_nolinfo)) {
964 error = -EINVAL;
965 break;
966 }
967 *outsize = sizeof(struct dfsreq_nolinfo);
968 nol = (struct dfsreq_nolinfo *)outdata;
969 dfs_get_nol(dfs, (struct dfsreq_nolelem *)nol->dfs_nol,
970 &nol->ic_nchans);
971 dfs_print_nol(dfs);
972 break;
973 case DFS_SET_NOL:
974 if (insize < sizeof(struct dfsreq_nolinfo) || !indata) {
975 error = -EINVAL;
976 break;
977 }
978 nol = (struct dfsreq_nolinfo *)indata;
979 dfs_set_nol(dfs, (struct dfsreq_nolelem *)nol->dfs_nol,
980 nol->ic_nchans);
981 break;
982
983 case DFS_SHOW_NOL:
984 dfs_print_nol(dfs);
985 break;
986 case DFS_BANGRADAR:
987#if 0 /* MERGE_TBD */
988 if (sc->sc_nostabeacons) {
989 printk("No radar detection Enabled \n");
990 break;
991 }
992#endif
993 dfs->dfs_bangradar = 1;
994 dfs->ath_radar_tasksched = 1;
995 OS_SET_TIMER(&dfs->ath_dfs_task_timer, 0);
996 break;
997#endif /* ATH_DFS_RADAR_DETECTION_ONLY */
998 default:
999 error = -EINVAL;
1000 }
1001bad:
1002 return error;
1003}
1004
1005int
1006dfs_set_thresholds(struct ieee80211com *ic, const uint32_t threshtype,
1007 const uint32_t value)
1008{
1009 struct ath_dfs *dfs = (struct ath_dfs *)ic->ic_dfs;
1010 int16_t chanindex;
1011 struct dfs_state *rs;
1012 struct ath_dfs_phyerr_param pe;
1013 int is_fastclk = 0; /* XXX throw-away */
1014
1015 if (dfs == NULL) {
1016 DFS_DPRINTK(dfs, ATH_DEBUG_DFS1, "%s: ic_dfs is NULL\n",
1017 __func__);
1018 return 0;
1019 }
1020
1021 chanindex = dfs->dfs_curchan_radindex;
1022 if ((chanindex < 0) || (chanindex >= DFS_NUM_RADAR_STATES)) {
1023 DFS_DPRINTK(dfs, ATH_DEBUG_DFS1,
1024 "%s: chanindex = %d, DFS_NUM_RADAR_STATES=%d\n",
1025 __func__, chanindex, DFS_NUM_RADAR_STATES);
1026 return 0;
1027 }
1028
1029 DFS_DPRINTK(dfs, ATH_DEBUG_DFS,
1030 "%s: threshtype=%d, value=%d\n", __func__, threshtype,
1031 value);
1032
1033 ath_dfs_phyerr_init_noval(&pe);
1034
1035 rs = &(dfs->dfs_radar[chanindex]);
1036 switch (threshtype) {
1037 case DFS_PARAM_FIRPWR:
1038 rs->rs_param.pe_firpwr = (int32_t) value;
1039 pe.pe_firpwr = value;
1040 break;
1041 case DFS_PARAM_RRSSI:
1042 rs->rs_param.pe_rrssi = value;
1043 pe.pe_rrssi = value;
1044 break;
1045 case DFS_PARAM_HEIGHT:
1046 rs->rs_param.pe_height = value;
1047 pe.pe_height = value;
1048 break;
1049 case DFS_PARAM_PRSSI:
1050 rs->rs_param.pe_prssi = value;
1051 pe.pe_prssi = value;
1052 break;
1053 case DFS_PARAM_INBAND:
1054 rs->rs_param.pe_inband = value;
1055 pe.pe_inband = value;
1056 break;
1057 /* 5413 specific */
1058 case DFS_PARAM_RELPWR:
1059 rs->rs_param.pe_relpwr = value;
1060 pe.pe_relpwr = value;
1061 break;
1062 case DFS_PARAM_RELSTEP:
1063 rs->rs_param.pe_relstep = value;
1064 pe.pe_relstep = value;
1065 break;
1066 case DFS_PARAM_MAXLEN:
1067 rs->rs_param.pe_maxlen = value;
1068 pe.pe_maxlen = value;
1069 break;
1070 default:
1071 DFS_DPRINTK(dfs, ATH_DEBUG_DFS1,
1072 "%s: unknown threshtype (%d)\n",
1073 __func__, threshtype);
1074 break;
1075 }
1076
1077 /*
1078 * The driver layer dfs_enable routine is tasked with translating
1079 * values from the global format to the per-device (HAL, offload)
1080 * format.
1081 */
1082 ic->ic_dfs_enable(ic, &is_fastclk, &pe);
1083 return 1;
1084}
1085
1086int
1087dfs_get_thresholds(struct ieee80211com *ic, struct ath_dfs_phyerr_param *param)
1088{
1089 /* UMACDFS : TODO:ath_hal_getdfsthresh(sc->sc_ah, param); */
1090
1091 OS_MEMZERO(param, sizeof(*param));
1092
1093 (void)ic->ic_dfs_get_thresholds(ic, param);
1094
1095 return 1;
1096}
1097
1098uint16_t dfs_usenol(struct ieee80211com *ic)
1099{
1100 struct ath_dfs *dfs = (struct ath_dfs *)ic->ic_dfs;
1101 return dfs ? (uint16_t) dfs->dfs_rinfo.rn_use_nol : 0;
1102}
1103
1104uint16_t dfs_isdfsregdomain(struct ieee80211com *ic)
1105{
1106 struct ath_dfs *dfs = (struct ath_dfs *)ic->ic_dfs;
1107 return dfs ? dfs->dfsdomain : 0;
1108}
1109
1110#endif /* ATH_UPPORT_DFS */