blob: 68bee813dfad08b2307beda53158b4352d00fb2c [file] [log] [blame]
Johannes Bergf444de02010-05-05 15:25:02 +02001/*
2 * mac80211 - channel management
3 */
4
Johannes Berg0aaffa92010-05-05 15:28:27 +02005#include <linux/nl80211.h>
Johannes Berg3448c002012-09-11 17:57:42 +02006#include <linux/export.h>
Johannes Berg4d76d212012-12-11 20:38:41 +01007#include <linux/rtnetlink.h>
Paul Stewart3117bbdb2012-03-13 07:46:18 -07008#include <net/cfg80211.h>
Johannes Bergf444de02010-05-05 15:25:02 +02009#include "ieee80211_i.h"
Michal Kazior35f2fce2012-06-26 14:37:20 +020010#include "driver-ops.h"
Johannes Bergf444de02010-05-05 15:25:02 +020011
Eliad Peller21f659b2013-11-11 20:14:01 +020012static enum nl80211_chan_width ieee80211_get_sta_bw(struct ieee80211_sta *sta)
13{
14 switch (sta->bandwidth) {
15 case IEEE80211_STA_RX_BW_20:
16 if (sta->ht_cap.ht_supported)
17 return NL80211_CHAN_WIDTH_20;
18 else
19 return NL80211_CHAN_WIDTH_20_NOHT;
20 case IEEE80211_STA_RX_BW_40:
21 return NL80211_CHAN_WIDTH_40;
22 case IEEE80211_STA_RX_BW_80:
23 return NL80211_CHAN_WIDTH_80;
24 case IEEE80211_STA_RX_BW_160:
25 /*
26 * This applied for both 160 and 80+80. since we use
27 * the returned value to consider degradation of
28 * ctx->conf.min_def, we have to make sure to take
29 * the bigger one (NL80211_CHAN_WIDTH_160).
30 * Otherwise we might try degrading even when not
31 * needed, as the max required sta_bw returned (80+80)
32 * might be smaller than the configured bw (160).
33 */
34 return NL80211_CHAN_WIDTH_160;
35 default:
36 WARN_ON(1);
37 return NL80211_CHAN_WIDTH_20;
38 }
39}
40
41static enum nl80211_chan_width
42ieee80211_get_max_required_bw(struct ieee80211_sub_if_data *sdata)
43{
44 enum nl80211_chan_width max_bw = NL80211_CHAN_WIDTH_20_NOHT;
45 struct sta_info *sta;
46
47 rcu_read_lock();
48 list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) {
49 if (sdata != sta->sdata &&
50 !(sta->sdata->bss && sta->sdata->bss == sdata->bss))
51 continue;
52
53 if (!sta->uploaded)
54 continue;
55
56 max_bw = max(max_bw, ieee80211_get_sta_bw(&sta->sta));
57 }
58 rcu_read_unlock();
59
60 return max_bw;
61}
62
63static enum nl80211_chan_width
64ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local,
65 struct ieee80211_chanctx_conf *conf)
66{
67 struct ieee80211_sub_if_data *sdata;
68 enum nl80211_chan_width max_bw = NL80211_CHAN_WIDTH_20_NOHT;
69
70 rcu_read_lock();
71 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
72 struct ieee80211_vif *vif = &sdata->vif;
73 enum nl80211_chan_width width = NL80211_CHAN_WIDTH_20_NOHT;
74
75 if (!ieee80211_sdata_running(sdata))
76 continue;
77
78 if (rcu_access_pointer(sdata->vif.chanctx_conf) != conf)
79 continue;
80
81 switch (vif->type) {
82 case NL80211_IFTYPE_AP:
83 case NL80211_IFTYPE_AP_VLAN:
84 width = ieee80211_get_max_required_bw(sdata);
85 break;
86 case NL80211_IFTYPE_P2P_DEVICE:
87 continue;
88 case NL80211_IFTYPE_STATION:
89 case NL80211_IFTYPE_ADHOC:
90 case NL80211_IFTYPE_WDS:
91 case NL80211_IFTYPE_MESH_POINT:
92 width = vif->bss_conf.chandef.width;
93 break;
94 case NL80211_IFTYPE_UNSPECIFIED:
95 case NUM_NL80211_IFTYPES:
96 case NL80211_IFTYPE_MONITOR:
97 case NL80211_IFTYPE_P2P_CLIENT:
98 case NL80211_IFTYPE_P2P_GO:
99 WARN_ON_ONCE(1);
100 }
101 max_bw = max(max_bw, width);
102 }
Eliad Peller1c37a722014-03-03 13:37:14 +0200103
104 /* use the configured bandwidth in case of monitor interface */
105 sdata = rcu_dereference(local->monitor_sdata);
106 if (sdata && rcu_access_pointer(sdata->vif.chanctx_conf) == conf)
107 max_bw = max(max_bw, conf->def.width);
108
Eliad Peller21f659b2013-11-11 20:14:01 +0200109 rcu_read_unlock();
110
111 return max_bw;
112}
113
114/*
115 * recalc the min required chan width of the channel context, which is
116 * the max of min required widths of all the interfaces bound to this
117 * channel context.
118 */
119void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
120 struct ieee80211_chanctx *ctx)
121{
122 enum nl80211_chan_width max_bw;
123 struct cfg80211_chan_def min_def;
124
125 lockdep_assert_held(&local->chanctx_mtx);
126
127 /* don't optimize 5MHz, 10MHz, and radar_enabled confs */
128 if (ctx->conf.def.width == NL80211_CHAN_WIDTH_5 ||
129 ctx->conf.def.width == NL80211_CHAN_WIDTH_10 ||
130 ctx->conf.radar_enabled) {
131 ctx->conf.min_def = ctx->conf.def;
132 return;
133 }
134
135 max_bw = ieee80211_get_chanctx_max_required_bw(local, &ctx->conf);
136
137 /* downgrade chandef up to max_bw */
138 min_def = ctx->conf.def;
139 while (min_def.width > max_bw)
140 ieee80211_chandef_downgrade(&min_def);
141
142 if (cfg80211_chandef_identical(&ctx->conf.min_def, &min_def))
143 return;
144
145 ctx->conf.min_def = min_def;
146 if (!ctx->driver_present)
147 return;
148
149 drv_change_chanctx(local, ctx, IEEE80211_CHANCTX_CHANGE_MIN_WIDTH);
150}
151
Johannes Berg18942d32013-02-07 21:30:37 +0100152static void ieee80211_change_chanctx(struct ieee80211_local *local,
Johannes Berg4bf88532012-11-09 11:39:59 +0100153 struct ieee80211_chanctx *ctx,
154 const struct cfg80211_chan_def *chandef)
Michal Kazior23a85b452012-06-26 14:37:21 +0200155{
Johannes Berg4bf88532012-11-09 11:39:59 +0100156 if (cfg80211_chandef_identical(&ctx->conf.def, chandef))
Michal Kaziore89a96f2012-06-26 14:37:22 +0200157 return;
158
Johannes Berg4bf88532012-11-09 11:39:59 +0100159 WARN_ON(!cfg80211_chandef_compatible(&ctx->conf.def, chandef));
160
161 ctx->conf.def = *chandef;
162 drv_change_chanctx(local, ctx, IEEE80211_CHANCTX_CHANGE_WIDTH);
Eliad Peller21f659b2013-11-11 20:14:01 +0200163 ieee80211_recalc_chanctx_min_def(local, ctx);
Johannes Berg55de9082012-07-26 17:24:39 +0200164
165 if (!local->use_chanctx) {
Karl Beldan675a0b02013-03-25 16:26:57 +0100166 local->_oper_chandef = *chandef;
Johannes Berg55de9082012-07-26 17:24:39 +0200167 ieee80211_hw_config(local, 0);
168 }
Johannes Berg0aaffa92010-05-05 15:28:27 +0200169}
Michal Kaziord01a1e62012-06-26 14:37:16 +0200170
171static struct ieee80211_chanctx *
172ieee80211_find_chanctx(struct ieee80211_local *local,
Johannes Berg4bf88532012-11-09 11:39:59 +0100173 const struct cfg80211_chan_def *chandef,
Michal Kaziord01a1e62012-06-26 14:37:16 +0200174 enum ieee80211_chanctx_mode mode)
175{
176 struct ieee80211_chanctx *ctx;
177
178 lockdep_assert_held(&local->chanctx_mtx);
179
180 if (mode == IEEE80211_CHANCTX_EXCLUSIVE)
181 return NULL;
Michal Kaziord01a1e62012-06-26 14:37:16 +0200182
183 list_for_each_entry(ctx, &local->chanctx_list, list) {
Johannes Berg4bf88532012-11-09 11:39:59 +0100184 const struct cfg80211_chan_def *compat;
Michal Kaziore89a96f2012-06-26 14:37:22 +0200185
Michal Kaziord01a1e62012-06-26 14:37:16 +0200186 if (ctx->mode == IEEE80211_CHANCTX_EXCLUSIVE)
187 continue;
Johannes Berg4bf88532012-11-09 11:39:59 +0100188
189 compat = cfg80211_chandef_compatible(&ctx->conf.def, chandef);
190 if (!compat)
Michal Kaziord01a1e62012-06-26 14:37:16 +0200191 continue;
192
Johannes Berg18942d32013-02-07 21:30:37 +0100193 ieee80211_change_chanctx(local, ctx, compat);
Michal Kaziore89a96f2012-06-26 14:37:22 +0200194
Michal Kaziord01a1e62012-06-26 14:37:16 +0200195 return ctx;
196 }
197
198 return NULL;
199}
200
Simon Wunderliche4746852013-04-08 22:43:16 +0200201static bool ieee80211_is_radar_required(struct ieee80211_local *local)
202{
203 struct ieee80211_sub_if_data *sdata;
204
Michal Kaziorcc901de2014-01-29 07:56:20 +0100205 lockdep_assert_held(&local->mtx);
206
Simon Wunderliche4746852013-04-08 22:43:16 +0200207 rcu_read_lock();
208 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
209 if (sdata->radar_required) {
210 rcu_read_unlock();
211 return true;
212 }
213 }
214 rcu_read_unlock();
215
216 return false;
217}
218
Michal Kaziord01a1e62012-06-26 14:37:16 +0200219static struct ieee80211_chanctx *
220ieee80211_new_chanctx(struct ieee80211_local *local,
Johannes Berg4bf88532012-11-09 11:39:59 +0100221 const struct cfg80211_chan_def *chandef,
Michal Kaziord01a1e62012-06-26 14:37:16 +0200222 enum ieee80211_chanctx_mode mode)
223{
224 struct ieee80211_chanctx *ctx;
Johannes Berg382a1032013-03-22 22:30:09 +0100225 u32 changed;
Michal Kazior35f2fce2012-06-26 14:37:20 +0200226 int err;
Michal Kaziord01a1e62012-06-26 14:37:16 +0200227
228 lockdep_assert_held(&local->chanctx_mtx);
229
230 ctx = kzalloc(sizeof(*ctx) + local->hw.chanctx_data_size, GFP_KERNEL);
231 if (!ctx)
232 return ERR_PTR(-ENOMEM);
233
Johannes Berg4bf88532012-11-09 11:39:59 +0100234 ctx->conf.def = *chandef;
Johannes Berg04ecd252012-09-11 14:34:12 +0200235 ctx->conf.rx_chains_static = 1;
236 ctx->conf.rx_chains_dynamic = 1;
Michal Kaziord01a1e62012-06-26 14:37:16 +0200237 ctx->mode = mode;
Simon Wunderliche4746852013-04-08 22:43:16 +0200238 ctx->conf.radar_enabled = ieee80211_is_radar_required(local);
Eliad Peller21f659b2013-11-11 20:14:01 +0200239 ieee80211_recalc_chanctx_min_def(local, ctx);
Simon Wunderliche4746852013-04-08 22:43:16 +0200240 if (!local->use_chanctx)
241 local->hw.conf.radar_enabled = ctx->conf.radar_enabled;
Michal Kaziord01a1e62012-06-26 14:37:16 +0200242
Johannes Berg34a37402013-12-18 09:43:33 +0100243 /* we hold the mutex to prevent idle from changing */
244 lockdep_assert_held(&local->mtx);
Johannes Berg382a1032013-03-22 22:30:09 +0100245 /* turn idle off *before* setting channel -- some drivers need that */
246 changed = ieee80211_idle_off(local);
247 if (changed)
248 ieee80211_hw_config(local, changed);
249
Johannes Berg55de9082012-07-26 17:24:39 +0200250 if (!local->use_chanctx) {
Karl Beldan675a0b02013-03-25 16:26:57 +0100251 local->_oper_chandef = *chandef;
Johannes Berg55de9082012-07-26 17:24:39 +0200252 ieee80211_hw_config(local, 0);
253 } else {
254 err = drv_add_chanctx(local, ctx);
255 if (err) {
256 kfree(ctx);
Johannes Berg382a1032013-03-22 22:30:09 +0100257 ieee80211_recalc_idle(local);
Johannes Berg34a37402013-12-18 09:43:33 +0100258 return ERR_PTR(err);
Johannes Berg55de9082012-07-26 17:24:39 +0200259 }
Michal Kazior35f2fce2012-06-26 14:37:20 +0200260 }
261
Johannes Berg382a1032013-03-22 22:30:09 +0100262 /* and keep the mutex held until the new chanctx is on the list */
Johannes Berg3448c002012-09-11 17:57:42 +0200263 list_add_rcu(&ctx->list, &local->chanctx_list);
Michal Kaziord01a1e62012-06-26 14:37:16 +0200264
265 return ctx;
266}
267
268static void ieee80211_free_chanctx(struct ieee80211_local *local,
269 struct ieee80211_chanctx *ctx)
270{
Simon Wunderliche4746852013-04-08 22:43:16 +0200271 bool check_single_channel = false;
Michal Kaziord01a1e62012-06-26 14:37:16 +0200272 lockdep_assert_held(&local->chanctx_mtx);
273
274 WARN_ON_ONCE(ctx->refcount != 0);
275
Johannes Berg55de9082012-07-26 17:24:39 +0200276 if (!local->use_chanctx) {
Karl Beldan675a0b02013-03-25 16:26:57 +0100277 struct cfg80211_chan_def *chandef = &local->_oper_chandef;
278 chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
279 chandef->center_freq1 = chandef->chan->center_freq;
280 chandef->center_freq2 = 0;
Simon Wunderliche4746852013-04-08 22:43:16 +0200281
282 /* NOTE: Disabling radar is only valid here for
283 * single channel context. To be sure, check it ...
284 */
285 if (local->hw.conf.radar_enabled)
286 check_single_channel = true;
287 local->hw.conf.radar_enabled = false;
288
Johannes Berg55de9082012-07-26 17:24:39 +0200289 ieee80211_hw_config(local, 0);
290 } else {
291 drv_remove_chanctx(local, ctx);
292 }
Michal Kazior35f2fce2012-06-26 14:37:20 +0200293
Johannes Berg3448c002012-09-11 17:57:42 +0200294 list_del_rcu(&ctx->list);
Michal Kaziord01a1e62012-06-26 14:37:16 +0200295 kfree_rcu(ctx, rcu_head);
Johannes Bergfd0f9792013-02-07 00:14:51 +0100296
Simon Wunderliche4746852013-04-08 22:43:16 +0200297 /* throw a warning if this wasn't the only channel context. */
298 WARN_ON(check_single_channel && !list_empty(&local->chanctx_list));
299
Johannes Bergfd0f9792013-02-07 00:14:51 +0100300 ieee80211_recalc_idle(local);
Michal Kaziord01a1e62012-06-26 14:37:16 +0200301}
302
303static int ieee80211_assign_vif_chanctx(struct ieee80211_sub_if_data *sdata,
304 struct ieee80211_chanctx *ctx)
305{
Michal Kazior35f2fce2012-06-26 14:37:20 +0200306 struct ieee80211_local *local = sdata->local;
307 int ret;
Michal Kaziord01a1e62012-06-26 14:37:16 +0200308
309 lockdep_assert_held(&local->chanctx_mtx);
310
Michal Kazior35f2fce2012-06-26 14:37:20 +0200311 ret = drv_assign_vif_chanctx(local, sdata, ctx);
312 if (ret)
313 return ret;
314
Michal Kaziord01a1e62012-06-26 14:37:16 +0200315 rcu_assign_pointer(sdata->vif.chanctx_conf, &ctx->conf);
316 ctx->refcount++;
317
Johannes Berg1ea6f9c2012-10-24 10:59:25 +0200318 ieee80211_recalc_txpower(sdata);
Eliad Peller21f659b2013-11-11 20:14:01 +0200319 ieee80211_recalc_chanctx_min_def(local, ctx);
Johannes Bergfd0f9792013-02-07 00:14:51 +0100320 sdata->vif.bss_conf.idle = false;
Johannes Berg5bbe754d2013-02-13 13:50:51 +0100321
322 if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
323 sdata->vif.type != NL80211_IFTYPE_MONITOR)
324 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IDLE);
Johannes Berg1ea6f9c2012-10-24 10:59:25 +0200325
Michal Kaziord01a1e62012-06-26 14:37:16 +0200326 return 0;
327}
328
Johannes Berg4bf88532012-11-09 11:39:59 +0100329static void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local,
330 struct ieee80211_chanctx *ctx)
Michal Kaziore89a96f2012-06-26 14:37:22 +0200331{
332 struct ieee80211_chanctx_conf *conf = &ctx->conf;
333 struct ieee80211_sub_if_data *sdata;
Johannes Berg4bf88532012-11-09 11:39:59 +0100334 const struct cfg80211_chan_def *compat = NULL;
Michal Kaziore89a96f2012-06-26 14:37:22 +0200335
336 lockdep_assert_held(&local->chanctx_mtx);
337
338 rcu_read_lock();
339 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
Johannes Berg4bf88532012-11-09 11:39:59 +0100340
Michal Kaziore89a96f2012-06-26 14:37:22 +0200341 if (!ieee80211_sdata_running(sdata))
342 continue;
343 if (rcu_access_pointer(sdata->vif.chanctx_conf) != conf)
344 continue;
345
Johannes Berg4bf88532012-11-09 11:39:59 +0100346 if (!compat)
347 compat = &sdata->vif.bss_conf.chandef;
348
349 compat = cfg80211_chandef_compatible(
350 &sdata->vif.bss_conf.chandef, compat);
351 if (!compat)
352 break;
Michal Kaziore89a96f2012-06-26 14:37:22 +0200353 }
354 rcu_read_unlock();
355
Johannes Berg4bf88532012-11-09 11:39:59 +0100356 if (WARN_ON_ONCE(!compat))
357 return;
Michal Kaziore89a96f2012-06-26 14:37:22 +0200358
Johannes Berg18942d32013-02-07 21:30:37 +0100359 ieee80211_change_chanctx(local, ctx, compat);
Michal Kaziore89a96f2012-06-26 14:37:22 +0200360}
361
Johannes Berg367bbd12013-12-18 09:36:09 +0100362static void ieee80211_recalc_radar_chanctx(struct ieee80211_local *local,
363 struct ieee80211_chanctx *chanctx)
364{
365 bool radar_enabled;
366
367 lockdep_assert_held(&local->chanctx_mtx);
Johannes Berg34a37402013-12-18 09:43:33 +0100368 /* for setting local->radar_detect_enabled */
369 lockdep_assert_held(&local->mtx);
Johannes Berg367bbd12013-12-18 09:36:09 +0100370
371 radar_enabled = ieee80211_is_radar_required(local);
372
373 if (radar_enabled == chanctx->conf.radar_enabled)
374 return;
375
376 chanctx->conf.radar_enabled = radar_enabled;
377 local->radar_detect_enabled = chanctx->conf.radar_enabled;
378
379 if (!local->use_chanctx) {
380 local->hw.conf.radar_enabled = chanctx->conf.radar_enabled;
381 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
382 }
383
384 drv_change_chanctx(local, chanctx, IEEE80211_CHANCTX_CHANGE_RADAR);
385}
386
Michal Kaziord01a1e62012-06-26 14:37:16 +0200387static void ieee80211_unassign_vif_chanctx(struct ieee80211_sub_if_data *sdata,
388 struct ieee80211_chanctx *ctx)
389{
Michal Kazior35f2fce2012-06-26 14:37:20 +0200390 struct ieee80211_local *local = sdata->local;
Michal Kaziord01a1e62012-06-26 14:37:16 +0200391
392 lockdep_assert_held(&local->chanctx_mtx);
393
394 ctx->refcount--;
395 rcu_assign_pointer(sdata->vif.chanctx_conf, NULL);
Michal Kazior35f2fce2012-06-26 14:37:20 +0200396
Johannes Bergfd0f9792013-02-07 00:14:51 +0100397 sdata->vif.bss_conf.idle = true;
Johannes Berg5bbe754d2013-02-13 13:50:51 +0100398
399 if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
400 sdata->vif.type != NL80211_IFTYPE_MONITOR)
401 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IDLE);
Johannes Bergfd0f9792013-02-07 00:14:51 +0100402
Michal Kazior35f2fce2012-06-26 14:37:20 +0200403 drv_unassign_vif_chanctx(local, sdata, ctx);
Michal Kaziore89a96f2012-06-26 14:37:22 +0200404
Johannes Berg04ecd252012-09-11 14:34:12 +0200405 if (ctx->refcount > 0) {
Michal Kaziore89a96f2012-06-26 14:37:22 +0200406 ieee80211_recalc_chanctx_chantype(sdata->local, ctx);
Johannes Berg04ecd252012-09-11 14:34:12 +0200407 ieee80211_recalc_smps_chanctx(local, ctx);
Simon Wunderlich164eb022013-02-08 18:16:20 +0100408 ieee80211_recalc_radar_chanctx(local, ctx);
Eliad Peller21f659b2013-11-11 20:14:01 +0200409 ieee80211_recalc_chanctx_min_def(local, ctx);
Johannes Berg04ecd252012-09-11 14:34:12 +0200410 }
Michal Kaziord01a1e62012-06-26 14:37:16 +0200411}
412
413static void __ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata)
414{
415 struct ieee80211_local *local = sdata->local;
416 struct ieee80211_chanctx_conf *conf;
417 struct ieee80211_chanctx *ctx;
418
419 lockdep_assert_held(&local->chanctx_mtx);
420
421 conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
422 lockdep_is_held(&local->chanctx_mtx));
423 if (!conf)
424 return;
425
426 ctx = container_of(conf, struct ieee80211_chanctx, conf);
427
428 ieee80211_unassign_vif_chanctx(sdata, ctx);
429 if (ctx->refcount == 0)
430 ieee80211_free_chanctx(local, ctx);
431}
432
Johannes Berg04ecd252012-09-11 14:34:12 +0200433void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local,
434 struct ieee80211_chanctx *chanctx)
435{
436 struct ieee80211_sub_if_data *sdata;
437 u8 rx_chains_static, rx_chains_dynamic;
438
439 lockdep_assert_held(&local->chanctx_mtx);
440
441 rx_chains_static = 1;
442 rx_chains_dynamic = 1;
443
444 rcu_read_lock();
445 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
446 u8 needed_static, needed_dynamic;
447
448 if (!ieee80211_sdata_running(sdata))
449 continue;
450
451 if (rcu_access_pointer(sdata->vif.chanctx_conf) !=
452 &chanctx->conf)
453 continue;
454
455 switch (sdata->vif.type) {
456 case NL80211_IFTYPE_P2P_DEVICE:
457 continue;
458 case NL80211_IFTYPE_STATION:
459 if (!sdata->u.mgd.associated)
460 continue;
461 break;
462 case NL80211_IFTYPE_AP_VLAN:
463 continue;
464 case NL80211_IFTYPE_AP:
465 case NL80211_IFTYPE_ADHOC:
466 case NL80211_IFTYPE_WDS:
467 case NL80211_IFTYPE_MESH_POINT:
468 break;
469 default:
470 WARN_ON_ONCE(1);
471 }
472
473 switch (sdata->smps_mode) {
474 default:
475 WARN_ONCE(1, "Invalid SMPS mode %d\n",
476 sdata->smps_mode);
477 /* fall through */
478 case IEEE80211_SMPS_OFF:
479 needed_static = sdata->needed_rx_chains;
480 needed_dynamic = sdata->needed_rx_chains;
481 break;
482 case IEEE80211_SMPS_DYNAMIC:
483 needed_static = 1;
484 needed_dynamic = sdata->needed_rx_chains;
485 break;
486 case IEEE80211_SMPS_STATIC:
487 needed_static = 1;
488 needed_dynamic = 1;
489 break;
490 }
491
492 rx_chains_static = max(rx_chains_static, needed_static);
493 rx_chains_dynamic = max(rx_chains_dynamic, needed_dynamic);
494 }
Ido Yariv7b8a9cd2014-03-24 09:55:45 +0200495
496 /* Disable SMPS for the monitor interface */
497 sdata = rcu_dereference(local->monitor_sdata);
498 if (sdata &&
499 rcu_access_pointer(sdata->vif.chanctx_conf) == &chanctx->conf)
500 rx_chains_dynamic = rx_chains_static = local->rx_chains;
501
Johannes Berg04ecd252012-09-11 14:34:12 +0200502 rcu_read_unlock();
503
504 if (!local->use_chanctx) {
505 if (rx_chains_static > 1)
506 local->smps_mode = IEEE80211_SMPS_OFF;
507 else if (rx_chains_dynamic > 1)
508 local->smps_mode = IEEE80211_SMPS_DYNAMIC;
509 else
510 local->smps_mode = IEEE80211_SMPS_STATIC;
511 ieee80211_hw_config(local, 0);
512 }
513
514 if (rx_chains_static == chanctx->conf.rx_chains_static &&
515 rx_chains_dynamic == chanctx->conf.rx_chains_dynamic)
516 return;
517
518 chanctx->conf.rx_chains_static = rx_chains_static;
519 chanctx->conf.rx_chains_dynamic = rx_chains_dynamic;
520 drv_change_chanctx(local, chanctx, IEEE80211_CHANCTX_CHANGE_RX_CHAINS);
521}
522
Michal Kaziord01a1e62012-06-26 14:37:16 +0200523int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
Johannes Berg4bf88532012-11-09 11:39:59 +0100524 const struct cfg80211_chan_def *chandef,
Michal Kaziord01a1e62012-06-26 14:37:16 +0200525 enum ieee80211_chanctx_mode mode)
526{
527 struct ieee80211_local *local = sdata->local;
528 struct ieee80211_chanctx *ctx;
529 int ret;
530
Johannes Berg34a37402013-12-18 09:43:33 +0100531 lockdep_assert_held(&local->mtx);
532
Johannes Berg55de9082012-07-26 17:24:39 +0200533 WARN_ON(sdata->dev && netif_carrier_ok(sdata->dev));
534
Michal Kaziord01a1e62012-06-26 14:37:16 +0200535 mutex_lock(&local->chanctx_mtx);
536 __ieee80211_vif_release_channel(sdata);
537
Johannes Berg4bf88532012-11-09 11:39:59 +0100538 ctx = ieee80211_find_chanctx(local, chandef, mode);
Michal Kaziord01a1e62012-06-26 14:37:16 +0200539 if (!ctx)
Johannes Berg4bf88532012-11-09 11:39:59 +0100540 ctx = ieee80211_new_chanctx(local, chandef, mode);
Michal Kaziord01a1e62012-06-26 14:37:16 +0200541 if (IS_ERR(ctx)) {
542 ret = PTR_ERR(ctx);
543 goto out;
544 }
545
Johannes Berg4bf88532012-11-09 11:39:59 +0100546 sdata->vif.bss_conf.chandef = *chandef;
Johannes Berg55de9082012-07-26 17:24:39 +0200547
Michal Kaziord01a1e62012-06-26 14:37:16 +0200548 ret = ieee80211_assign_vif_chanctx(sdata, ctx);
549 if (ret) {
550 /* if assign fails refcount stays the same */
551 if (ctx->refcount == 0)
552 ieee80211_free_chanctx(local, ctx);
553 goto out;
554 }
555
Johannes Berg04ecd252012-09-11 14:34:12 +0200556 ieee80211_recalc_smps_chanctx(local, ctx);
Simon Wunderlich164eb022013-02-08 18:16:20 +0100557 ieee80211_recalc_radar_chanctx(local, ctx);
Michal Kaziord01a1e62012-06-26 14:37:16 +0200558 out:
559 mutex_unlock(&local->chanctx_mtx);
560 return ret;
561}
562
Simon Wunderlich73da7d52013-07-11 16:09:06 +0200563int ieee80211_vif_change_channel(struct ieee80211_sub_if_data *sdata,
Simon Wunderlich73da7d52013-07-11 16:09:06 +0200564 u32 *changed)
565{
566 struct ieee80211_local *local = sdata->local;
567 struct ieee80211_chanctx_conf *conf;
568 struct ieee80211_chanctx *ctx;
Luciano Coelho33787fc2013-11-11 20:34:54 +0200569 const struct cfg80211_chan_def *chandef = &sdata->csa_chandef;
Simon Wunderlich73da7d52013-07-11 16:09:06 +0200570 int ret;
571 u32 chanctx_changed = 0;
572
Johannes Berg34a37402013-12-18 09:43:33 +0100573 lockdep_assert_held(&local->mtx);
574
Simon Wunderlich73da7d52013-07-11 16:09:06 +0200575 /* should never be called if not performing a channel switch. */
576 if (WARN_ON(!sdata->vif.csa_active))
577 return -EINVAL;
578
579 if (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef,
580 IEEE80211_CHAN_DISABLED))
581 return -EINVAL;
582
583 mutex_lock(&local->chanctx_mtx);
584 conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
585 lockdep_is_held(&local->chanctx_mtx));
586 if (!conf) {
587 ret = -EINVAL;
588 goto out;
589 }
590
591 ctx = container_of(conf, struct ieee80211_chanctx, conf);
592 if (ctx->refcount != 1) {
593 ret = -EINVAL;
594 goto out;
595 }
596
597 if (sdata->vif.bss_conf.chandef.width != chandef->width) {
598 chanctx_changed = IEEE80211_CHANCTX_CHANGE_WIDTH;
599 *changed |= BSS_CHANGED_BANDWIDTH;
600 }
601
602 sdata->vif.bss_conf.chandef = *chandef;
603 ctx->conf.def = *chandef;
604
605 chanctx_changed |= IEEE80211_CHANCTX_CHANGE_CHANNEL;
606 drv_change_chanctx(local, ctx, chanctx_changed);
607
Simon Wunderlich73da7d52013-07-11 16:09:06 +0200608 ieee80211_recalc_chanctx_chantype(local, ctx);
609 ieee80211_recalc_smps_chanctx(local, ctx);
610 ieee80211_recalc_radar_chanctx(local, ctx);
Eliad Peller21f659b2013-11-11 20:14:01 +0200611 ieee80211_recalc_chanctx_min_def(local, ctx);
Simon Wunderlich73da7d52013-07-11 16:09:06 +0200612
613 ret = 0;
614 out:
615 mutex_unlock(&local->chanctx_mtx);
616 return ret;
617}
618
Johannes Berg2c9b7352013-02-07 21:37:29 +0100619int ieee80211_vif_change_bandwidth(struct ieee80211_sub_if_data *sdata,
620 const struct cfg80211_chan_def *chandef,
621 u32 *changed)
622{
623 struct ieee80211_local *local = sdata->local;
624 struct ieee80211_chanctx_conf *conf;
625 struct ieee80211_chanctx *ctx;
626 int ret;
627
628 if (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef,
629 IEEE80211_CHAN_DISABLED))
630 return -EINVAL;
631
632 mutex_lock(&local->chanctx_mtx);
633 if (cfg80211_chandef_identical(chandef, &sdata->vif.bss_conf.chandef)) {
634 ret = 0;
635 goto out;
636 }
637
638 if (chandef->width == NL80211_CHAN_WIDTH_20_NOHT ||
639 sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT) {
640 ret = -EINVAL;
641 goto out;
642 }
643
644 conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
645 lockdep_is_held(&local->chanctx_mtx));
646 if (!conf) {
647 ret = -EINVAL;
648 goto out;
649 }
650
651 ctx = container_of(conf, struct ieee80211_chanctx, conf);
652 if (!cfg80211_chandef_compatible(&conf->def, chandef)) {
653 ret = -EINVAL;
654 goto out;
655 }
656
657 sdata->vif.bss_conf.chandef = *chandef;
658
659 ieee80211_recalc_chanctx_chantype(local, ctx);
660
661 *changed |= BSS_CHANGED_BANDWIDTH;
662 ret = 0;
663 out:
664 mutex_unlock(&local->chanctx_mtx);
665 return ret;
666}
667
Michal Kaziord01a1e62012-06-26 14:37:16 +0200668void ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata)
669{
Johannes Berg55de9082012-07-26 17:24:39 +0200670 WARN_ON(sdata->dev && netif_carrier_ok(sdata->dev));
671
Johannes Berg34a37402013-12-18 09:43:33 +0100672 lockdep_assert_held(&sdata->local->mtx);
673
Michal Kaziord01a1e62012-06-26 14:37:16 +0200674 mutex_lock(&sdata->local->chanctx_mtx);
675 __ieee80211_vif_release_channel(sdata);
676 mutex_unlock(&sdata->local->chanctx_mtx);
677}
Johannes Berg3448c002012-09-11 17:57:42 +0200678
Johannes Berg4d76d212012-12-11 20:38:41 +0100679void ieee80211_vif_vlan_copy_chanctx(struct ieee80211_sub_if_data *sdata)
680{
681 struct ieee80211_local *local = sdata->local;
682 struct ieee80211_sub_if_data *ap;
683 struct ieee80211_chanctx_conf *conf;
684
685 if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->bss))
686 return;
687
688 ap = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap);
689
690 mutex_lock(&local->chanctx_mtx);
691
692 conf = rcu_dereference_protected(ap->vif.chanctx_conf,
693 lockdep_is_held(&local->chanctx_mtx));
694 rcu_assign_pointer(sdata->vif.chanctx_conf, conf);
695 mutex_unlock(&local->chanctx_mtx);
696}
697
Johannes Berg1f4ac5a2013-02-08 12:07:44 +0100698void ieee80211_vif_copy_chanctx_to_vlans(struct ieee80211_sub_if_data *sdata,
699 bool clear)
700{
701 struct ieee80211_local *local = sdata->local;
702 struct ieee80211_sub_if_data *vlan;
703 struct ieee80211_chanctx_conf *conf;
704
705 ASSERT_RTNL();
706
707 if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_AP))
708 return;
709
710 mutex_lock(&local->chanctx_mtx);
711
712 /*
713 * Check that conf exists, even when clearing this function
714 * must be called with the AP's channel context still there
715 * as it would otherwise cause VLANs to have an invalid
716 * channel context pointer for a while, possibly pointing
717 * to a channel context that has already been freed.
718 */
719 conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
720 lockdep_is_held(&local->chanctx_mtx));
721 WARN_ON(!conf);
722
723 if (clear)
724 conf = NULL;
725
726 list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
727 rcu_assign_pointer(vlan->vif.chanctx_conf, conf);
728
729 mutex_unlock(&local->chanctx_mtx);
730}
731
Johannes Berg3448c002012-09-11 17:57:42 +0200732void ieee80211_iter_chan_contexts_atomic(
733 struct ieee80211_hw *hw,
734 void (*iter)(struct ieee80211_hw *hw,
735 struct ieee80211_chanctx_conf *chanctx_conf,
736 void *data),
737 void *iter_data)
738{
739 struct ieee80211_local *local = hw_to_local(hw);
740 struct ieee80211_chanctx *ctx;
741
742 rcu_read_lock();
743 list_for_each_entry_rcu(ctx, &local->chanctx_list, list)
Johannes Berg8a61af62012-12-13 17:42:30 +0100744 if (ctx->driver_present)
745 iter(hw, &ctx->conf, iter_data);
Johannes Berg3448c002012-09-11 17:57:42 +0200746 rcu_read_unlock();
747}
748EXPORT_SYMBOL_GPL(ieee80211_iter_chan_contexts_atomic);