blob: 58568f2bf790084cad2a9b9f6234e5cbb6f0f606 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Anurag Chouhan6d760662016-02-20 16:05:43 +05302 * Copyright (c) 2002-2016 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28/*===========================================================================
29
Anurag Chouhan6d760662016-02-20 16:05:43 +053030 dfs.c
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080031
32 OVERVIEW:
33
34 Source code borrowed from QCA_MAIN DFS module
35
36 DEPENDENCIES:
37
38 Are listed for each API below.
39
40 ===========================================================================*/
41
42/*===========================================================================
43
Anurag Chouhan6d760662016-02-20 16:05:43 +053044 EDIT HISTORY FOR FILE
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080045
46 This section contains comments describing changes made to the module.
47 Notice that changes are listed in reverse chronological order.
48
49 when who what, where, why
50 ---------- --- --------------------------------------------------------
51
52 ===========================================================================*/
53
54#include <osdep.h>
55
56#ifndef ATH_SUPPORT_DFS
57#define ATH_SUPPORT_DFS 1
58
59/* #include "if_athioctl.h" */
60/* #include "if_athvar.h" */
61#include "dfs_ioctl.h"
62#include "dfs.h"
63
64int domainoverride = DFS_UNINIT_DOMAIN;
65
66/*
67** channel switch announcement (CSA)
68** usenol=1 (default) make CSA and switch to a new channel on radar detect
69** usenol=0, make CSA with next channel same as current on radar detect
70** usenol=2, no CSA and stay on the same channel on radar detect
71**/
72
73int usenol = 1;
74uint32_t dfs_debug_level = ATH_DEBUG_DFS;
75
76#if 0 /* the code to call this is curently commented-out below */
77/*
78 * Mark a channel as having interference detected upon it.
79 *
80 * This adds the interference marker to both the primary and
81 * extension channel.
82 *
83 * XXX TODO: make the NOL and channel interference logic a bit smarter
84 * so only the channel with the radar event is marked, rather than
85 * both the primary and extension.
86 */
87static void
Chandrasekaran, Manishekar22a7e1e2015-11-05 10:38:49 +053088dfs_channel_mark_radar(struct ath_dfs *dfs, struct dfs_ieee80211_channel *chan)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080089{
90 struct ieee80211_channel_list chan_info;
91 int i;
92
93 /* chan->ic_flagext |= CHANNEL_INTERFERENCE; */
94
95 /*
96 * If radar is detected in 40MHz mode, add both the primary and the
97 * extension channels to the NOL. chan is the channel data we return
98 * to the ath_dev layer which passes it on to the 80211 layer.
99 * As we want the AP to change channels and send out a CSA,
100 * we always pass back the primary channel data to the ath_dev layer.
101 */
102 if ((dfs->dfs_rinfo.rn_use_nol == 1) &&
103 (dfs->ic->ic_opmode == IEEE80211_M_HOSTAP ||
104 dfs->ic->ic_opmode == IEEE80211_M_IBSS)) {
105 chan_info.cl_nchans = 0;
106 dfs->ic->ic_get_ext_chan_info(dfs->ic, &chan_info);
107
108 for (i = 0; i < chan_info.cl_nchans; i++) {
109 if (chan_info.cl_channels[i] == NULL) {
110 DFS_PRINTK("%s: NULL channel\n", __func__);
111 } else {
112 chan_info.cl_channels[i]->ic_flagext |=
113 CHANNEL_INTERFERENCE;
114 dfs_nol_addchan(dfs, chan_info.cl_channels[i],
115 dfs->ath_dfs_nol_timeout);
116 }
117 }
118
119 /*
120 * Update the umac/driver channels with the new NOL information.
121 */
122 dfs_nol_update(dfs);
123 }
124}
125#endif /* #if 0 */
126
127static os_timer_func(dfs_task)
128{
129 struct ieee80211com *ic;
130 struct ath_dfs *dfs = NULL;
131
132 OS_GET_TIMER_ARG(ic, struct ieee80211com *);
133 dfs = (struct ath_dfs *)ic->ic_dfs;
134 /*
135 * XXX no locking?!
136 */
137 if (dfs_process_radarevent(dfs, ic->ic_curchan)) {
138#ifndef ATH_DFS_RADAR_DETECTION_ONLY
139
140 /*
141 * This marks the channel (and the extension channel, if HT40) as
142 * having seen a radar event. It marks CHAN_INTERFERENCE and
143 * will add it to the local NOL implementation.
144 *
145 * This is only done for 'usenol=1', as the other two modes
146 * don't do radar notification or CAC/CSA/NOL; it just notes
147 * there was a radar.
148 */
149
150 if (dfs->dfs_rinfo.rn_use_nol == 1) {
151 /* dfs_channel_mark_radar(dfs, ic->ic_curchan); */
152 }
153#endif /* ATH_DFS_RADAR_DETECTION_ONLY */
154
155 /*
156 * This calls into the umac DFS code, which sets the umac related
157 * radar flags and begins the channel change machinery.
158 *
159 * XXX TODO: the umac NOL code isn't used, but IEEE80211_CHAN_RADAR
160 * still gets set. Since the umac NOL code isn't used, that flag
161 * is never cleared. This needs to be fixed. See EV 105776.
162 */
163 if (dfs->dfs_rinfo.rn_use_nol == 1) {
164 ic->ic_dfs_notify_radar(ic, ic->ic_curchan);
165 } else if (dfs->dfs_rinfo.rn_use_nol == 0) {
166 /*
167 * For the test mode, don't do a CSA here; but setup the
168 * test timer so we get a CSA _back_ to the original channel.
169 */
170 OS_CANCEL_TIMER(&dfs->ath_dfstesttimer);
171 dfs->ath_dfstest = 1;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530172 qdf_spin_lock_bh(&ic->chan_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800173 dfs->ath_dfstest_ieeechan = ic->ic_curchan->ic_ieee;
Anurag Chouhana37b5b72016-02-21 14:53:42 +0530174 qdf_spin_unlock_bh(&ic->chan_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800175 dfs->ath_dfstesttime = 1; /* 1ms */
176 OS_SET_TIMER(&dfs->ath_dfstesttimer,
177 dfs->ath_dfstesttime);
178 }
179 }
180 dfs->ath_radar_tasksched = 0;
181}
182
183static os_timer_func(dfs_testtimer_task)
184{
185 struct ieee80211com *ic;
186 struct ath_dfs *dfs = NULL;
187
188 OS_GET_TIMER_ARG(ic, struct ieee80211com *);
189 dfs = (struct ath_dfs *)ic->ic_dfs;
190
191 /* XXX no locking? */
192 dfs->ath_dfstest = 0;
193
194 /*
195 * Flip the channel back to the original channel.
196 * Make sure this is done properly with a CSA.
197 */
198 DFS_PRINTK("%s: go back to channel %d\n",
199 __func__, dfs->ath_dfstest_ieeechan);
200
201 /*
202 * XXX The mere existence of this method indirection
203 * to a umac function means this code belongs in
204 * the driver, _not_ here. Please fix this!
205 */
206 ic->ic_start_csa(ic, dfs->ath_dfstest_ieeechan);
207}
208
209static int dfs_get_debug_info(struct ieee80211com *ic, int type, void *data)
210{
211 struct ath_dfs *dfs = (struct ath_dfs *)ic->ic_dfs;
212 if (data) {
213 *(uint32_t *) data = dfs->dfs_proc_phyerr;
214 }
215 return (int)dfs->dfs_proc_phyerr;
216}
217
218int dfs_attach(struct ieee80211com *ic)
219{
220 int i, n;
221 struct ath_dfs *dfs = (struct ath_dfs *)ic->ic_dfs;
222 struct ath_dfs_radar_tab_info radar_info;
223
224 if (dfs != NULL) {
225 /*DFS_DPRINTK(dfs, ATH_DEBUG_DFS1,
226 "%s: ic_dfs was not NULL\n",
227 __func__);
228 */
229 return 1;
230 }
231
232 dfs =
233 (struct ath_dfs *)os_malloc(NULL, sizeof(struct ath_dfs),
234 GFP_ATOMIC);
235
236 if (dfs == NULL) {
237 /*DFS_DPRINTK(dfs, ATH_DEBUG_DFS1,
238 "%s: ath_dfs allocation failed\n", __func__); */
239 return 1;
240 }
241
242 OS_MEMZERO(dfs, sizeof(struct ath_dfs));
243
244 ic->ic_dfs = (void *)dfs;
245
246 dfs->ic = ic;
247
248 ic->ic_dfs_debug = dfs_get_debug_info;
249#ifndef ATH_DFS_RADAR_DETECTION_ONLY
250 dfs->dfs_nol = NULL;
251#endif
252
253 /*
254 * Zero out radar_info. It's possible that the attach function won't
255 * fetch an initial regulatory configuration; you really do want to
256 * ensure that the contents indicates there aren't any filters.
257 */
258 OS_MEMZERO(&radar_info, sizeof(radar_info));
259 ic->ic_dfs_attach(ic, &dfs->dfs_caps, &radar_info);
260 dfs_clear_stats(ic);
261 dfs->dfs_event_log_on = 0;
262 OS_INIT_TIMER(NULL, &(dfs->ath_dfs_task_timer), dfs_task, (void *)(ic),
Anurag Chouhan6d760662016-02-20 16:05:43 +0530263 QDF_TIMER_TYPE_SW);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800264#ifndef ATH_DFS_RADAR_DETECTION_ONLY
265 OS_INIT_TIMER(NULL, &(dfs->ath_dfstesttimer), dfs_testtimer_task,
Anurag Chouhan6d760662016-02-20 16:05:43 +0530266 (void *)ic, QDF_TIMER_TYPE_SW);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800267 dfs->ath_dfs_cac_time = ATH_DFS_WAIT_MS;
268 dfs->ath_dfstesttime = ATH_DFS_TEST_RETURN_PERIOD_MS;
269#endif
270 ATH_DFSQ_LOCK_INIT(dfs);
271 STAILQ_INIT(&dfs->dfs_radarq);
272 ATH_ARQ_LOCK_INIT(dfs);
273 STAILQ_INIT(&dfs->dfs_arq);
274 STAILQ_INIT(&(dfs->dfs_eventq));
275 ATH_DFSEVENTQ_LOCK_INIT(dfs);
276 dfs->events = (struct dfs_event *)os_malloc(NULL,
277 sizeof(struct dfs_event) *
278 DFS_MAX_EVENTS, GFP_ATOMIC);
279 if (dfs->events == NULL) {
280 OS_FREE(dfs);
281 ic->ic_dfs = NULL;
282 DFS_PRINTK("%s: events allocation failed\n", __func__);
283 return 1;
284 }
285 for (i = 0; i < DFS_MAX_EVENTS; i++) {
286 STAILQ_INSERT_TAIL(&(dfs->dfs_eventq), &dfs->events[i],
287 re_list);
288 }
289
290 dfs->pulses =
291 (struct dfs_pulseline *)os_malloc(NULL,
292 sizeof(struct dfs_pulseline),
293 GFP_ATOMIC);
294 if (dfs->pulses == NULL) {
295 OS_FREE(dfs->events);
296 dfs->events = NULL;
297 OS_FREE(dfs);
298 ic->ic_dfs = NULL;
299 DFS_PRINTK("%s: pulse buffer allocation failed\n", __func__);
300 return 1;
301 }
302
Rakesh Sunkif7f82e52015-12-14 15:09:40 -0800303 /*
304 * If the chip supports DFS-3 then allocate
305 * memory for pulses for extension segment.
306 */
307 if (ic->dfs_hw_bd_id != DFS_HWBD_QCA6174) {
308 dfs->pulses_ext_seg = (struct dfs_pulseline *)
309 os_malloc(NULL,
310 sizeof(struct dfs_pulseline),
311 GFP_ATOMIC);
312 if (dfs->pulses_ext_seg == NULL) {
313 OS_FREE(dfs->events);
314 dfs->events = NULL;
315 OS_FREE(dfs);
316 ic->ic_dfs = NULL;
Anurag Chouhan6d760662016-02-20 16:05:43 +0530317 CDF_TRACE(QDF_MODULE_ID_SAP, CDF_TRACE_LEVEL_ERROR,
Rakesh Sunkif7f82e52015-12-14 15:09:40 -0800318 "%s[%d]: pulse buffer allocation failed",
319 __func__, __LINE__);
320 return 1;
321 }
322 dfs->pulses_ext_seg->pl_lastelem = DFS_MAX_PULSE_BUFFER_MASK;
323 }
324
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800325 dfs->pulses->pl_lastelem = DFS_MAX_PULSE_BUFFER_MASK;
326
327 /* Allocate memory for radar filters */
328 for (n = 0; n < DFS_MAX_RADAR_TYPES; n++) {
329 dfs->dfs_radarf[n] =
330 (struct dfs_filtertype *)os_malloc(NULL,
331 sizeof(struct
332 dfs_filtertype),
333 GFP_ATOMIC);
334 if (dfs->dfs_radarf[n] == NULL) {
335 DFS_PRINTK
336 ("%s: cannot allocate memory for radar filter types\n",
337 __func__);
338 goto bad1;
339 }
340 OS_MEMZERO(dfs->dfs_radarf[n], sizeof(struct dfs_filtertype));
341 }
342 /* Allocate memory for radar table */
343 dfs->dfs_radartable =
Anurag Chouhan6d760662016-02-20 16:05:43 +0530344 (int8_t **) os_malloc(NULL, 256 * sizeof(int8_t *), GFP_ATOMIC);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800345 if (dfs->dfs_radartable == NULL) {
346 DFS_PRINTK("%s: cannot allocate memory for radar table\n",
347 __func__);
348 goto bad1;
349 }
350 for (n = 0; n < 256; n++) {
351 dfs->dfs_radartable[n] =
352 os_malloc(NULL, DFS_MAX_RADAR_OVERLAP * sizeof(int8_t),
353 GFP_ATOMIC);
354 if (dfs->dfs_radartable[n] == NULL) {
355 DFS_PRINTK
356 ("%s: cannot allocate memory for radar table entry\n",
357 __func__);
358 goto bad2;
359 }
360 }
361
362 if (usenol == 0)
363 DFS_PRINTK("%s: NOL disabled\n", __func__);
364 else if (usenol == 2)
365 DFS_PRINTK("%s: NOL disabled; no CSA\n", __func__);
366
367 dfs->dfs_rinfo.rn_use_nol = usenol;
368
369 /* Init the cached extension channel busy for false alarm reduction */
370 dfs->dfs_rinfo.ext_chan_busy_ts = ic->ic_get_TSF64(ic);
371 dfs->dfs_rinfo.dfs_ext_chan_busy = 0;
372 /* Init the Bin5 chirping related data */
373 dfs->dfs_rinfo.dfs_bin5_chirp_ts = dfs->dfs_rinfo.ext_chan_busy_ts;
374 dfs->dfs_rinfo.dfs_last_bin5_dur = MAX_BIN5_DUR;
375 dfs->dfs_b5radars = NULL;
Rakesh Sunkif7f82e52015-12-14 15:09:40 -0800376 if (ic->dfs_hw_bd_id != DFS_HWBD_QCA6174) {
377 dfs->dfs_rinfo.dfs_last_bin5_dur_ext_seg = MAX_BIN5_DUR;
378 dfs->dfs_b5radars_ext_seg = NULL;
379 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800380
381 /*
382 * If dfs_init_radar_filters() fails, we can abort here and
383 * reconfigure when the first valid channel + radar config
384 * is available.
385 */
386 if (dfs_init_radar_filters(ic, &radar_info)) {
387 DFS_PRINTK(" %s: Radar Filter Intialization Failed \n",
388 __func__);
389 return 1;
390 }
391
392 dfs->ath_dfs_false_rssi_thres = RSSI_POSSIBLY_FALSE;
393 dfs->ath_dfs_peak_mag = SEARCH_FFT_REPORT_PEAK_MAG_THRSH;
394 dfs->dfs_phyerr_freq_min = 0x7fffffff;
395 dfs->dfs_phyerr_freq_max = 0;
396 dfs->dfs_phyerr_queued_count = 0;
397 dfs->dfs_phyerr_w53_counter = 0;
398 dfs->dfs_pri_multiplier = 2;
399
400 dfs->ath_dfs_nol_timeout = DFS_NOL_TIMEOUT_S;
401 return 0;
402
403bad2:
404 OS_FREE(dfs->dfs_radartable);
405 dfs->dfs_radartable = NULL;
406bad1:
407 for (n = 0; n < DFS_MAX_RADAR_TYPES; n++) {
408 if (dfs->dfs_radarf[n] != NULL) {
409 OS_FREE(dfs->dfs_radarf[n]);
410 dfs->dfs_radarf[n] = NULL;
411 }
412 }
413 if (dfs->pulses) {
414 OS_FREE(dfs->pulses);
415 dfs->pulses = NULL;
416 }
Rakesh Sunkif7f82e52015-12-14 15:09:40 -0800417 if (dfs->pulses_ext_seg &&
418 ic->dfs_hw_bd_id != DFS_HWBD_QCA6174) {
419 OS_FREE(dfs->pulses_ext_seg);
420 dfs->pulses_ext_seg = NULL;
421 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800422 if (dfs->events) {
423 OS_FREE(dfs->events);
424 dfs->events = NULL;
425 }
426
427 if (ic->ic_dfs) {
428 OS_FREE(ic->ic_dfs);
429 ic->ic_dfs = NULL;
430 }
431 return 1;
432#undef N
433}
434
435void dfs_detach(struct ieee80211com *ic)
436{
437 struct ath_dfs *dfs = (struct ath_dfs *)ic->ic_dfs;
438 int n, empty;
439
440 if (dfs == NULL) {
441 DFS_DPRINTK(dfs, ATH_DEBUG_DFS1, "%s: ic_dfs is NULL\n",
442 __func__);
443 return;
444 }
445
446 /* Bug 29099 make sure all outstanding timers are cancelled */
447
448 if (dfs->ath_radar_tasksched) {
449 OS_CANCEL_TIMER(&dfs->ath_dfs_task_timer);
450 dfs->ath_radar_tasksched = 0;
451 }
452
453 if (dfs->ath_dfstest) {
454 OS_CANCEL_TIMER(&dfs->ath_dfstesttimer);
455 dfs->ath_dfstest = 0;
456 }
457#if 0
458#ifndef ATH_DFS_RADAR_DETECTION_ONLY
459 if (dfs->ic_dfswait) {
460 OS_CANCEL_TIMER(&dfs->ic_dfswaittimer);
461 dfs->ath_dfswait = 0;
462 }
463
464 OS_CANCEL_TIMER(&dfs->sc_dfs_war_timer);
465 if (dfs->dfs_nol != NULL) {
466 struct dfs_nolelem *nol, *next;
467 nol = dfs->dfs_nol;
468 /* Bug 29099 - each NOL element has its own timer, cancel it and
469 free the element */
470 while (nol != NULL) {
471 OS_CANCEL_TIMER(&nol->nol_timer);
472 next = nol->nol_next;
473 OS_FREE(nol);
474 nol = next;
475 }
476 dfs->dfs_nol = NULL;
477 }
478#endif
479#endif
Rakesh Sunkif7f82e52015-12-14 15:09:40 -0800480
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800481 /* Return radar events to free q */
482 dfs_reset_radarq(dfs);
Rakesh Sunkif7f82e52015-12-14 15:09:40 -0800483 dfs_reset_alldelaylines(dfs, DFS_80P80_SEG0);
484 if (ic->dfs_hw_bd_id != DFS_HWBD_QCA6174)
485 dfs_reset_alldelaylines(dfs, DFS_80P80_SEG1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800486
487 /* Free up pulse log */
488 if (dfs->pulses != NULL) {
489 OS_FREE(dfs->pulses);
490 dfs->pulses = NULL;
491 }
492
Rakesh Sunkif7f82e52015-12-14 15:09:40 -0800493 if (dfs->pulses_ext_seg != NULL &&
494 ic->dfs_hw_bd_id != DFS_HWBD_QCA6174) {
495 OS_FREE(dfs->pulses_ext_seg);
496 dfs->pulses_ext_seg = NULL;
497 }
498
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800499 for (n = 0; n < DFS_MAX_RADAR_TYPES; n++) {
500 if (dfs->dfs_radarf[n] != NULL) {
501 OS_FREE(dfs->dfs_radarf[n]);
502 dfs->dfs_radarf[n] = NULL;
503 }
504 }
505
506 if (dfs->dfs_radartable != NULL) {
507 for (n = 0; n < 256; n++) {
508 if (dfs->dfs_radartable[n] != NULL) {
509 OS_FREE(dfs->dfs_radartable[n]);
510 dfs->dfs_radartable[n] = NULL;
511 }
512 }
513 OS_FREE(dfs->dfs_radartable);
514 dfs->dfs_radartable = NULL;
515#ifndef ATH_DFS_RADAR_DETECTION_ONLY
516 dfs->ath_dfs_isdfsregdomain = 0;
517#endif
518 }
519
520 if (dfs->dfs_b5radars != NULL) {
521 OS_FREE(dfs->dfs_b5radars);
522 dfs->dfs_b5radars = NULL;
523 }
Rakesh Sunkif7f82e52015-12-14 15:09:40 -0800524 if (dfs->dfs_b5radars_ext_seg != NULL &&
525 ic->dfs_hw_bd_id != DFS_HWBD_QCA6174) {
526 OS_FREE(dfs->dfs_b5radars_ext_seg);
527 dfs->dfs_b5radars_ext_seg = NULL;
528 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800529
530/* Commenting out since all the ar functions are obsolete and
531 * the function definition has been removed as part of dfs_ar.c
532 * dfs_reset_ar(dfs);
533 */
534 ATH_ARQ_LOCK(dfs);
535 empty = STAILQ_EMPTY(&(dfs->dfs_arq));
536 ATH_ARQ_UNLOCK(dfs);
537 if (!empty) {
538/*
539 * Commenting out since all the ar functions are obsolete and
540 * the function definition has been removed as part of dfs_ar.c
541 *
542 * dfs_reset_arq(dfs);
543 */
544 }
545 if (dfs->events != NULL) {
546 OS_FREE(dfs->events);
547 dfs->events = NULL;
548 }
549 dfs_nol_timer_cleanup(dfs);
550 OS_FREE(dfs);
551
552 /* XXX? */
553 ic->ic_dfs = NULL;
554}
555
556/*
557 * This is called each time a channel change occurs, to (potentially) enable
558 * the radar code.
559 */
560int dfs_radar_disable(struct ieee80211com *ic)
561{
562 struct ath_dfs *dfs = (struct ath_dfs *)ic->ic_dfs;
563#ifdef ATH_ENABLE_AR
564 dfs->dfs_proc_phyerr &= ~DFS_AR_EN;
565#endif
566 dfs->dfs_proc_phyerr &= ~DFS_RADAR_EN;
567 return 0;
568}
569
570/*
571 * This is called each time a channel change occurs, to (potentially) enable
572 * the radar code.
573 */
574int dfs_radar_enable(struct ieee80211com *ic,
575 struct ath_dfs_radar_tab_info *radar_info)
576{
577 int is_ext_ch;
578 int is_fastclk = 0;
579 int radar_filters_init_status = 0;
580 /* uint32_t rfilt; */
581 struct ath_dfs *dfs;
582 struct dfs_state *rs_pri, *rs_ext;
Chandrasekaran, Manishekar22a7e1e2015-11-05 10:38:49 +0530583 struct dfs_ieee80211_channel *chan = ic->ic_curchan, *ext_ch = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800584 is_ext_ch = IEEE80211_IS_CHAN_11N_HT40(ic->ic_curchan);
585 dfs = (struct ath_dfs *)ic->ic_dfs;
586 rs_pri = NULL;
587 rs_ext = NULL;
588#if 0
589 int i;
590#endif
591 if (dfs == NULL) {
592 DFS_DPRINTK(dfs, ATH_DEBUG_DFS, "%s: ic_dfs is NULL\n",
593 __func__);
594
595 return -EIO;
596 }
597 ic->ic_dfs_disable(ic);
598
599 /*
600 * Setting country code might change the DFS domain
601 * so initialize the DFS Radar filters
602 */
603 radar_filters_init_status = dfs_init_radar_filters(ic, radar_info);
604
605 /*
606 * dfs_init_radar_filters() returns 1 on failure and
607 * 0 on success.
608 */
609 if (DFS_STATUS_FAIL == radar_filters_init_status) {
Anurag Chouhan6d760662016-02-20 16:05:43 +0530610 CDF_TRACE(QDF_MODULE_ID_SAP, CDF_TRACE_LEVEL_ERROR,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800611 "%s[%d]: DFS Radar Filters Initialization Failed",
612 __func__, __LINE__);
613 return -EIO;
614 }
615
616 if ((ic->ic_opmode == IEEE80211_M_HOSTAP
617 || ic->ic_opmode == IEEE80211_M_IBSS)) {
618
619 if (IEEE80211_IS_CHAN_DFS(chan)) {
620
621 uint8_t index_pri, index_ext;
622#ifdef ATH_ENABLE_AR
623 dfs->dfs_proc_phyerr |= DFS_AR_EN;
624#endif
625 dfs->dfs_proc_phyerr |= DFS_RADAR_EN;
626
627 if (is_ext_ch) {
628 ext_ch = ieee80211_get_extchan(ic);
629 }
Rakesh Sunkif7f82e52015-12-14 15:09:40 -0800630 dfs_reset_alldelaylines(dfs, DFS_80P80_SEG0);
631 /*
632 * Extension segment delaylines will be
633 * enabled only when SAP operates in 80p80
634 * and both the channels are DFS.
635 */
636 if (chan->ic_80p80_both_dfs)
637 dfs_reset_alldelaylines(dfs, DFS_80P80_SEG1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800638
639 rs_pri = dfs_getchanstate(dfs, &index_pri, 0);
640 if (ext_ch) {
641 rs_ext = dfs_getchanstate(dfs, &index_ext, 1);
642 }
643 if (rs_pri != NULL
644 && ((ext_ch == NULL) || (rs_ext != NULL))) {
645 struct ath_dfs_phyerr_param pe;
646
647 OS_MEMSET(&pe, '\0', sizeof(pe));
648
Rakesh Sunkif7f82e52015-12-14 15:09:40 -0800649 if (index_pri != dfs->dfs_curchan_radindex) {
650 dfs_reset_alldelaylines(dfs,
651 DFS_80P80_SEG0);
652 /*
653 * Reset only when ext segment is
654 * present
655 */
656 if (chan->ic_80p80_both_dfs)
657 dfs_reset_alldelaylines(dfs,
658 DFS_80P80_SEG1);
659 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800660 dfs->dfs_curchan_radindex = (int16_t) index_pri;
661 dfs->dfs_pri_multiplier_ini =
662 radar_info->dfs_pri_multiplier;
663
664 if (rs_ext)
665 dfs->dfs_extchan_radindex =
666 (int16_t) index_ext;
667
668 ath_dfs_phyerr_param_copy(&pe,
669 &rs_pri->rs_param);
670 DFS_DPRINTK(dfs, ATH_DEBUG_DFS3,
671 "%s: firpwr=%d, rssi=%d, height=%d, "
672 "prssi=%d, inband=%d, relpwr=%d, "
673 "relstep=%d, maxlen=%d\n",
674 __func__,
675 pe.pe_firpwr,
676 pe.pe_rrssi,
677 pe.pe_height,
678 pe.pe_prssi,
679 pe.pe_inband,
680 pe.pe_relpwr,
681 pe.pe_relstep, pe.pe_maxlen);
682
683 ic->ic_dfs_enable(ic, &is_fastclk, &pe);
684 DFS_DPRINTK(dfs, ATH_DEBUG_DFS,
685 "Enabled radar detection on channel %d\n",
686 chan->ic_freq);
687 dfs->dur_multiplier =
688 is_fastclk ? DFS_FAST_CLOCK_MULTIPLIER :
689 DFS_NO_FAST_CLOCK_MULTIPLIER;
690 DFS_DPRINTK(dfs, ATH_DEBUG_DFS3,
691 "%s: duration multiplier is %d\n",
692 __func__, dfs->dur_multiplier);
693 } else
694 DFS_DPRINTK(dfs, ATH_DEBUG_DFS,
695 "%s: No more radar states left\n",
696 __func__);
697 }
698 }
699
700 return DFS_STATUS_SUCCESS;
701}
702
703int
704dfs_control(struct ieee80211com *ic, u_int id,
705 void *indata, uint32_t insize, void *outdata, uint32_t *outsize)
706{
707 int error = 0;
708 struct ath_dfs_phyerr_param peout;
709 struct ath_dfs *dfs = (struct ath_dfs *)ic->ic_dfs;
710 struct dfs_ioctl_params *dfsparams;
711 uint32_t val = 0;
712#ifndef ATH_DFS_RADAR_DETECTION_ONLY
713 struct dfsreq_nolinfo *nol;
714 uint32_t *data = NULL;
715#endif /* ATH_DFS_RADAR_DETECTION_ONLY */
716 int i;
717
718 if (dfs == NULL) {
719 error = -EINVAL;
720 DFS_DPRINTK(dfs, ATH_DEBUG_DFS1, "%s DFS is null\n", __func__);
721 goto bad;
722 }
723
724 switch (id) {
725 case DFS_SET_THRESH:
726 if (insize < sizeof(struct dfs_ioctl_params) || !indata) {
727 DFS_DPRINTK(dfs, ATH_DEBUG_DFS1,
728 "%s: insize=%d, expected=%zu bytes, indata=%p\n",
729 __func__, insize,
730 sizeof(struct dfs_ioctl_params), indata);
731 error = -EINVAL;
732 break;
733 }
734 dfsparams = (struct dfs_ioctl_params *)indata;
735 if (!dfs_set_thresholds
736 (ic, DFS_PARAM_FIRPWR, dfsparams->dfs_firpwr))
737 error = -EINVAL;
738 if (!dfs_set_thresholds
739 (ic, DFS_PARAM_RRSSI, dfsparams->dfs_rrssi))
740 error = -EINVAL;
741 if (!dfs_set_thresholds
742 (ic, DFS_PARAM_HEIGHT, dfsparams->dfs_height))
743 error = -EINVAL;
744 if (!dfs_set_thresholds
745 (ic, DFS_PARAM_PRSSI, dfsparams->dfs_prssi))
746 error = -EINVAL;
747 if (!dfs_set_thresholds
748 (ic, DFS_PARAM_INBAND, dfsparams->dfs_inband))
749 error = -EINVAL;
750 /* 5413 speicfic */
751 if (!dfs_set_thresholds
752 (ic, DFS_PARAM_RELPWR, dfsparams->dfs_relpwr))
753 error = -EINVAL;
754 if (!dfs_set_thresholds
755 (ic, DFS_PARAM_RELSTEP, dfsparams->dfs_relstep))
756 error = -EINVAL;
757 if (!dfs_set_thresholds
758 (ic, DFS_PARAM_MAXLEN, dfsparams->dfs_maxlen))
759 error = -EINVAL;
760 break;
761 case DFS_GET_THRESH:
762 if (!outdata || !outsize
763 || *outsize < sizeof(struct dfs_ioctl_params)) {
764 error = -EINVAL;
765 break;
766 }
767 *outsize = sizeof(struct dfs_ioctl_params);
768 dfsparams = (struct dfs_ioctl_params *)outdata;
769
770 /*
771 * Fetch the DFS thresholds using the internal representation.
772 */
773 (void)dfs_get_thresholds(ic, &peout);
774
775 /*
776 * Convert them to the dfs IOCTL representation.
777 */
778 ath_dfs_dfsparam_to_ioctlparam(&peout, dfsparams);
779 break;
780 case DFS_RADARDETECTS:
781 if (!outdata || !outsize || *outsize < sizeof(uint32_t)) {
782 error = -EINVAL;
783 break;
784 }
785 *outsize = sizeof(uint32_t);
786 *((uint32_t *) outdata) = dfs->ath_dfs_stats.num_radar_detects;
787 break;
788 case DFS_DISABLE_DETECT:
789 dfs->dfs_proc_phyerr &= ~DFS_RADAR_EN;
790 dfs->ic->ic_dfs_state.ignore_dfs = 1;
791 DFS_PRINTK("%s enable detects, ignore_dfs %d\n",
792 __func__, dfs->ic->ic_dfs_state.ignore_dfs);
793 break;
794 case DFS_ENABLE_DETECT:
795 dfs->dfs_proc_phyerr |= DFS_RADAR_EN;
796 dfs->ic->ic_dfs_state.ignore_dfs = 0;
797 DFS_PRINTK("%s enable detects, ignore_dfs %d\n",
798 __func__, dfs->ic->ic_dfs_state.ignore_dfs);
799 break;
800 case DFS_DISABLE_FFT:
801 /* UMACDFS: TODO: val = ath_hal_dfs_config_fft(sc->sc_ah, false); */
802 DFS_PRINTK("%s TODO disable FFT val=0x%x \n", __func__, val);
803 break;
804 case DFS_ENABLE_FFT:
805 /* UMACDFS TODO: val = ath_hal_dfs_config_fft(sc->sc_ah, true); */
806 DFS_PRINTK("%s TODO enable FFT val=0x%x \n", __func__, val);
807 break;
808 case DFS_SET_DEBUG_LEVEL:
809 if (insize < sizeof(uint32_t) || !indata) {
810 error = -EINVAL;
811 break;
812 }
813 dfs->dfs_debug_mask = *(uint32_t *) indata;
814 DFS_PRINTK("%s debug level now = 0x%x \n",
815 __func__, dfs->dfs_debug_mask);
816 if (dfs->dfs_debug_mask & ATH_DEBUG_DFS3) {
817 /* Enable debug Radar Event */
818 dfs->dfs_event_log_on = 1;
819 } else {
820 dfs->dfs_event_log_on = 0;
821 }
822 break;
823 case DFS_SET_FALSE_RSSI_THRES:
824 if (insize < sizeof(uint32_t) || !indata) {
825 error = -EINVAL;
826 break;
827 }
828 dfs->ath_dfs_false_rssi_thres = *(uint32_t *) indata;
829 DFS_PRINTK("%s false RSSI threshold now = 0x%x \n",
830 __func__, dfs->ath_dfs_false_rssi_thres);
831 break;
832 case DFS_SET_PEAK_MAG:
833 if (insize < sizeof(uint32_t) || !indata) {
834 error = -EINVAL;
835 break;
836 }
837 dfs->ath_dfs_peak_mag = *(uint32_t *) indata;
838 DFS_PRINTK("%s peak_mag now = 0x%x \n",
839 __func__, dfs->ath_dfs_peak_mag);
840 break;
841 case DFS_IGNORE_CAC:
842 if (insize < sizeof(uint32_t) || !indata) {
843 error = -EINVAL;
844 break;
845 }
846 if (*(uint32_t *) indata) {
847 dfs->ic->ic_dfs_state.ignore_cac = 1;
848 } else {
849 dfs->ic->ic_dfs_state.ignore_cac = 0;
850 }
851 DFS_PRINTK("%s ignore cac = 0x%x \n",
852 __func__, dfs->ic->ic_dfs_state.ignore_cac);
853 break;
854 case DFS_SET_NOL_TIMEOUT:
855 if (insize < sizeof(uint32_t) || !indata) {
856 error = -EINVAL;
857 break;
858 }
859 if (*(int *)indata) {
860 dfs->ath_dfs_nol_timeout = *(int *)indata;
861 } else {
862 dfs->ath_dfs_nol_timeout = DFS_NOL_TIMEOUT_S;
863 }
864 DFS_PRINTK("%s nol timeout = %d sec \n",
865 __func__, dfs->ath_dfs_nol_timeout);
866 break;
867#ifndef ATH_DFS_RADAR_DETECTION_ONLY
868 case DFS_MUTE_TIME:
869 if (insize < sizeof(uint32_t) || !indata) {
870 error = -EINVAL;
871 break;
872 }
873 data = (uint32_t *) indata;
874 dfs->ath_dfstesttime = *data;
875 dfs->ath_dfstesttime *= (1000); /* convert sec into ms */
876 break;
877 case DFS_GET_USENOL:
878 if (!outdata || !outsize || *outsize < sizeof(uint32_t)) {
879 error = -EINVAL;
880 break;
881 }
882 *outsize = sizeof(uint32_t);
883 *((uint32_t *) outdata) = dfs->dfs_rinfo.rn_use_nol;
884
885 for (i = 0;
886 (i < DFS_EVENT_LOG_SIZE) && (i < dfs->dfs_event_log_count);
887 i++) {
888 /* DFS_DPRINTK(sc, ATH_DEBUG_DFS,"ts=%llu diff_ts=%u rssi=%u dur=%u\n", dfs->radar_log[i].ts, dfs->radar_log[i].diff_ts, dfs->radar_log[i].rssi, dfs->radar_log[i].dur); */
889
890 }
891 dfs->dfs_event_log_count = 0;
892 dfs->dfs_phyerr_count = 0;
893 dfs->dfs_phyerr_reject_count = 0;
894 dfs->dfs_phyerr_queued_count = 0;
895 dfs->dfs_phyerr_freq_min = 0x7fffffff;
896 dfs->dfs_phyerr_freq_max = 0;
897 break;
898 case DFS_SET_USENOL:
899 if (insize < sizeof(uint32_t) || !indata) {
900 error = -EINVAL;
901 break;
902 }
903 dfs->dfs_rinfo.rn_use_nol = *(uint32_t *) indata;
904 /* iwpriv markdfs in linux can do the same thing... */
905 break;
906 case DFS_GET_NOL:
907 if (!outdata || !outsize
908 || *outsize < sizeof(struct dfsreq_nolinfo)) {
909 error = -EINVAL;
910 break;
911 }
912 *outsize = sizeof(struct dfsreq_nolinfo);
913 nol = (struct dfsreq_nolinfo *)outdata;
914 dfs_get_nol(dfs, (struct dfsreq_nolelem *)nol->dfs_nol,
915 &nol->ic_nchans);
916 dfs_print_nol(dfs);
917 break;
918 case DFS_SET_NOL:
919 if (insize < sizeof(struct dfsreq_nolinfo) || !indata) {
920 error = -EINVAL;
921 break;
922 }
923 nol = (struct dfsreq_nolinfo *)indata;
924 dfs_set_nol(dfs, (struct dfsreq_nolelem *)nol->dfs_nol,
925 nol->ic_nchans);
926 break;
927
928 case DFS_SHOW_NOL:
929 dfs_print_nol(dfs);
930 break;
931 case DFS_BANGRADAR:
932#if 0 /* MERGE_TBD */
933 if (sc->sc_nostabeacons) {
934 printk("No radar detection Enabled \n");
935 break;
936 }
937#endif
938 dfs->dfs_bangradar = 1;
939 dfs->ath_radar_tasksched = 1;
940 OS_SET_TIMER(&dfs->ath_dfs_task_timer, 0);
941 break;
942#endif /* ATH_DFS_RADAR_DETECTION_ONLY */
943 default:
944 error = -EINVAL;
945 }
946bad:
947 return error;
948}
949
950int
951dfs_set_thresholds(struct ieee80211com *ic, const uint32_t threshtype,
952 const uint32_t value)
953{
954 struct ath_dfs *dfs = (struct ath_dfs *)ic->ic_dfs;
955 int16_t chanindex;
956 struct dfs_state *rs;
957 struct ath_dfs_phyerr_param pe;
958 int is_fastclk = 0; /* XXX throw-away */
959
960 if (dfs == NULL) {
961 DFS_DPRINTK(dfs, ATH_DEBUG_DFS1, "%s: ic_dfs is NULL\n",
962 __func__);
963 return 0;
964 }
965
966 chanindex = dfs->dfs_curchan_radindex;
967 if ((chanindex < 0) || (chanindex >= DFS_NUM_RADAR_STATES)) {
968 DFS_DPRINTK(dfs, ATH_DEBUG_DFS1,
969 "%s: chanindex = %d, DFS_NUM_RADAR_STATES=%d\n",
970 __func__, chanindex, DFS_NUM_RADAR_STATES);
971 return 0;
972 }
973
974 DFS_DPRINTK(dfs, ATH_DEBUG_DFS,
975 "%s: threshtype=%d, value=%d\n", __func__, threshtype,
976 value);
977
978 ath_dfs_phyerr_init_noval(&pe);
979
980 rs = &(dfs->dfs_radar[chanindex]);
981 switch (threshtype) {
982 case DFS_PARAM_FIRPWR:
983 rs->rs_param.pe_firpwr = (int32_t) value;
984 pe.pe_firpwr = value;
985 break;
986 case DFS_PARAM_RRSSI:
987 rs->rs_param.pe_rrssi = value;
988 pe.pe_rrssi = value;
989 break;
990 case DFS_PARAM_HEIGHT:
991 rs->rs_param.pe_height = value;
992 pe.pe_height = value;
993 break;
994 case DFS_PARAM_PRSSI:
995 rs->rs_param.pe_prssi = value;
996 pe.pe_prssi = value;
997 break;
998 case DFS_PARAM_INBAND:
999 rs->rs_param.pe_inband = value;
1000 pe.pe_inband = value;
1001 break;
1002 /* 5413 specific */
1003 case DFS_PARAM_RELPWR:
1004 rs->rs_param.pe_relpwr = value;
1005 pe.pe_relpwr = value;
1006 break;
1007 case DFS_PARAM_RELSTEP:
1008 rs->rs_param.pe_relstep = value;
1009 pe.pe_relstep = value;
1010 break;
1011 case DFS_PARAM_MAXLEN:
1012 rs->rs_param.pe_maxlen = value;
1013 pe.pe_maxlen = value;
1014 break;
1015 default:
1016 DFS_DPRINTK(dfs, ATH_DEBUG_DFS1,
1017 "%s: unknown threshtype (%d)\n",
1018 __func__, threshtype);
1019 break;
1020 }
1021
1022 /*
1023 * The driver layer dfs_enable routine is tasked with translating
1024 * values from the global format to the per-device (HAL, offload)
1025 * format.
1026 */
1027 ic->ic_dfs_enable(ic, &is_fastclk, &pe);
1028 return 1;
1029}
1030
1031int
1032dfs_get_thresholds(struct ieee80211com *ic, struct ath_dfs_phyerr_param *param)
1033{
1034 /* UMACDFS : TODO:ath_hal_getdfsthresh(sc->sc_ah, param); */
1035
1036 OS_MEMZERO(param, sizeof(*param));
1037
1038 (void)ic->ic_dfs_get_thresholds(ic, param);
1039
1040 return 1;
1041}
1042
1043uint16_t dfs_usenol(struct ieee80211com *ic)
1044{
1045 struct ath_dfs *dfs = (struct ath_dfs *)ic->ic_dfs;
1046 return dfs ? (uint16_t) dfs->dfs_rinfo.rn_use_nol : 0;
1047}
1048
1049uint16_t dfs_isdfsregdomain(struct ieee80211com *ic)
1050{
1051 struct ath_dfs *dfs = (struct ath_dfs *)ic->ic_dfs;
1052 return dfs ? dfs->dfsdomain : 0;
1053}
1054
1055#endif /* ATH_UPPORT_DFS */