blob: bd1fd8ea5105fead274e09b7bcd093ba97081a97 [file] [log] [blame]
Johannes Bergf444de02010-05-05 15:25:02 +02001/*
2 * mac80211 - channel management
3 */
4
Johannes Berg0aaffa92010-05-05 15:28:27 +02005#include <linux/nl80211.h>
Johannes Berg3448c002012-09-11 17:57:42 +02006#include <linux/export.h>
Johannes Berg4d76d212012-12-11 20:38:41 +01007#include <linux/rtnetlink.h>
Paul Stewart3117bbdb2012-03-13 07:46:18 -07008#include <net/cfg80211.h>
Johannes Bergf444de02010-05-05 15:25:02 +02009#include "ieee80211_i.h"
Michal Kazior35f2fce2012-06-26 14:37:20 +020010#include "driver-ops.h"
Johannes Bergf444de02010-05-05 15:25:02 +020011
Eliad Peller21f659b2013-11-11 20:14:01 +020012static enum nl80211_chan_width ieee80211_get_sta_bw(struct ieee80211_sta *sta)
13{
14 switch (sta->bandwidth) {
15 case IEEE80211_STA_RX_BW_20:
16 if (sta->ht_cap.ht_supported)
17 return NL80211_CHAN_WIDTH_20;
18 else
19 return NL80211_CHAN_WIDTH_20_NOHT;
20 case IEEE80211_STA_RX_BW_40:
21 return NL80211_CHAN_WIDTH_40;
22 case IEEE80211_STA_RX_BW_80:
23 return NL80211_CHAN_WIDTH_80;
24 case IEEE80211_STA_RX_BW_160:
25 /*
26 * This applied for both 160 and 80+80. since we use
27 * the returned value to consider degradation of
28 * ctx->conf.min_def, we have to make sure to take
29 * the bigger one (NL80211_CHAN_WIDTH_160).
30 * Otherwise we might try degrading even when not
31 * needed, as the max required sta_bw returned (80+80)
32 * might be smaller than the configured bw (160).
33 */
34 return NL80211_CHAN_WIDTH_160;
35 default:
36 WARN_ON(1);
37 return NL80211_CHAN_WIDTH_20;
38 }
39}
40
41static enum nl80211_chan_width
42ieee80211_get_max_required_bw(struct ieee80211_sub_if_data *sdata)
43{
44 enum nl80211_chan_width max_bw = NL80211_CHAN_WIDTH_20_NOHT;
45 struct sta_info *sta;
46
47 rcu_read_lock();
48 list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) {
49 if (sdata != sta->sdata &&
50 !(sta->sdata->bss && sta->sdata->bss == sdata->bss))
51 continue;
52
53 if (!sta->uploaded)
54 continue;
55
56 max_bw = max(max_bw, ieee80211_get_sta_bw(&sta->sta));
57 }
58 rcu_read_unlock();
59
60 return max_bw;
61}
62
63static enum nl80211_chan_width
64ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local,
65 struct ieee80211_chanctx_conf *conf)
66{
67 struct ieee80211_sub_if_data *sdata;
68 enum nl80211_chan_width max_bw = NL80211_CHAN_WIDTH_20_NOHT;
69
70 rcu_read_lock();
71 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
72 struct ieee80211_vif *vif = &sdata->vif;
73 enum nl80211_chan_width width = NL80211_CHAN_WIDTH_20_NOHT;
74
75 if (!ieee80211_sdata_running(sdata))
76 continue;
77
78 if (rcu_access_pointer(sdata->vif.chanctx_conf) != conf)
79 continue;
80
81 switch (vif->type) {
82 case NL80211_IFTYPE_AP:
83 case NL80211_IFTYPE_AP_VLAN:
84 width = ieee80211_get_max_required_bw(sdata);
85 break;
86 case NL80211_IFTYPE_P2P_DEVICE:
87 continue;
88 case NL80211_IFTYPE_STATION:
89 case NL80211_IFTYPE_ADHOC:
90 case NL80211_IFTYPE_WDS:
91 case NL80211_IFTYPE_MESH_POINT:
92 width = vif->bss_conf.chandef.width;
93 break;
94 case NL80211_IFTYPE_UNSPECIFIED:
95 case NUM_NL80211_IFTYPES:
96 case NL80211_IFTYPE_MONITOR:
97 case NL80211_IFTYPE_P2P_CLIENT:
98 case NL80211_IFTYPE_P2P_GO:
99 WARN_ON_ONCE(1);
100 }
101 max_bw = max(max_bw, width);
102 }
Eliad Peller1c37a722014-03-03 13:37:14 +0200103
104 /* use the configured bandwidth in case of monitor interface */
105 sdata = rcu_dereference(local->monitor_sdata);
106 if (sdata && rcu_access_pointer(sdata->vif.chanctx_conf) == conf)
107 max_bw = max(max_bw, conf->def.width);
108
Eliad Peller21f659b2013-11-11 20:14:01 +0200109 rcu_read_unlock();
110
111 return max_bw;
112}
113
114/*
115 * recalc the min required chan width of the channel context, which is
116 * the max of min required widths of all the interfaces bound to this
117 * channel context.
118 */
119void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
120 struct ieee80211_chanctx *ctx)
121{
122 enum nl80211_chan_width max_bw;
123 struct cfg80211_chan_def min_def;
124
125 lockdep_assert_held(&local->chanctx_mtx);
126
127 /* don't optimize 5MHz, 10MHz, and radar_enabled confs */
128 if (ctx->conf.def.width == NL80211_CHAN_WIDTH_5 ||
129 ctx->conf.def.width == NL80211_CHAN_WIDTH_10 ||
130 ctx->conf.radar_enabled) {
131 ctx->conf.min_def = ctx->conf.def;
132 return;
133 }
134
135 max_bw = ieee80211_get_chanctx_max_required_bw(local, &ctx->conf);
136
137 /* downgrade chandef up to max_bw */
138 min_def = ctx->conf.def;
139 while (min_def.width > max_bw)
140 ieee80211_chandef_downgrade(&min_def);
141
142 if (cfg80211_chandef_identical(&ctx->conf.min_def, &min_def))
143 return;
144
145 ctx->conf.min_def = min_def;
146 if (!ctx->driver_present)
147 return;
148
149 drv_change_chanctx(local, ctx, IEEE80211_CHANCTX_CHANGE_MIN_WIDTH);
150}
151
Johannes Berg18942d32013-02-07 21:30:37 +0100152static void ieee80211_change_chanctx(struct ieee80211_local *local,
Johannes Berg4bf88532012-11-09 11:39:59 +0100153 struct ieee80211_chanctx *ctx,
154 const struct cfg80211_chan_def *chandef)
Michal Kazior23a85b452012-06-26 14:37:21 +0200155{
Johannes Berg4bf88532012-11-09 11:39:59 +0100156 if (cfg80211_chandef_identical(&ctx->conf.def, chandef))
Michal Kaziore89a96f2012-06-26 14:37:22 +0200157 return;
158
Johannes Berg4bf88532012-11-09 11:39:59 +0100159 WARN_ON(!cfg80211_chandef_compatible(&ctx->conf.def, chandef));
160
161 ctx->conf.def = *chandef;
162 drv_change_chanctx(local, ctx, IEEE80211_CHANCTX_CHANGE_WIDTH);
Eliad Peller21f659b2013-11-11 20:14:01 +0200163 ieee80211_recalc_chanctx_min_def(local, ctx);
Johannes Berg55de9082012-07-26 17:24:39 +0200164
165 if (!local->use_chanctx) {
Karl Beldan675a0b02013-03-25 16:26:57 +0100166 local->_oper_chandef = *chandef;
Johannes Berg55de9082012-07-26 17:24:39 +0200167 ieee80211_hw_config(local, 0);
168 }
Johannes Berg0aaffa92010-05-05 15:28:27 +0200169}
Michal Kaziord01a1e62012-06-26 14:37:16 +0200170
171static struct ieee80211_chanctx *
172ieee80211_find_chanctx(struct ieee80211_local *local,
Johannes Berg4bf88532012-11-09 11:39:59 +0100173 const struct cfg80211_chan_def *chandef,
Michal Kaziord01a1e62012-06-26 14:37:16 +0200174 enum ieee80211_chanctx_mode mode)
175{
176 struct ieee80211_chanctx *ctx;
177
178 lockdep_assert_held(&local->chanctx_mtx);
179
180 if (mode == IEEE80211_CHANCTX_EXCLUSIVE)
181 return NULL;
Michal Kaziord01a1e62012-06-26 14:37:16 +0200182
183 list_for_each_entry(ctx, &local->chanctx_list, list) {
Johannes Berg4bf88532012-11-09 11:39:59 +0100184 const struct cfg80211_chan_def *compat;
Michal Kaziore89a96f2012-06-26 14:37:22 +0200185
Michal Kaziord01a1e62012-06-26 14:37:16 +0200186 if (ctx->mode == IEEE80211_CHANCTX_EXCLUSIVE)
187 continue;
Johannes Berg4bf88532012-11-09 11:39:59 +0100188
189 compat = cfg80211_chandef_compatible(&ctx->conf.def, chandef);
190 if (!compat)
Michal Kaziord01a1e62012-06-26 14:37:16 +0200191 continue;
192
Johannes Berg18942d32013-02-07 21:30:37 +0100193 ieee80211_change_chanctx(local, ctx, compat);
Michal Kaziore89a96f2012-06-26 14:37:22 +0200194
Michal Kaziord01a1e62012-06-26 14:37:16 +0200195 return ctx;
196 }
197
198 return NULL;
199}
200
Simon Wunderliche4746852013-04-08 22:43:16 +0200201static bool ieee80211_is_radar_required(struct ieee80211_local *local)
202{
203 struct ieee80211_sub_if_data *sdata;
204
Michal Kaziorcc901de2014-01-29 07:56:20 +0100205 lockdep_assert_held(&local->mtx);
206
Simon Wunderliche4746852013-04-08 22:43:16 +0200207 rcu_read_lock();
208 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
209 if (sdata->radar_required) {
210 rcu_read_unlock();
211 return true;
212 }
213 }
214 rcu_read_unlock();
215
216 return false;
217}
218
Michal Kaziord01a1e62012-06-26 14:37:16 +0200219static struct ieee80211_chanctx *
220ieee80211_new_chanctx(struct ieee80211_local *local,
Johannes Berg4bf88532012-11-09 11:39:59 +0100221 const struct cfg80211_chan_def *chandef,
Michal Kaziord01a1e62012-06-26 14:37:16 +0200222 enum ieee80211_chanctx_mode mode)
223{
224 struct ieee80211_chanctx *ctx;
Johannes Berg382a1032013-03-22 22:30:09 +0100225 u32 changed;
Michal Kazior35f2fce2012-06-26 14:37:20 +0200226 int err;
Michal Kaziord01a1e62012-06-26 14:37:16 +0200227
228 lockdep_assert_held(&local->chanctx_mtx);
229
230 ctx = kzalloc(sizeof(*ctx) + local->hw.chanctx_data_size, GFP_KERNEL);
231 if (!ctx)
232 return ERR_PTR(-ENOMEM);
233
Johannes Berg4bf88532012-11-09 11:39:59 +0100234 ctx->conf.def = *chandef;
Johannes Berg04ecd252012-09-11 14:34:12 +0200235 ctx->conf.rx_chains_static = 1;
236 ctx->conf.rx_chains_dynamic = 1;
Michal Kaziord01a1e62012-06-26 14:37:16 +0200237 ctx->mode = mode;
Simon Wunderliche4746852013-04-08 22:43:16 +0200238 ctx->conf.radar_enabled = ieee80211_is_radar_required(local);
Eliad Peller21f659b2013-11-11 20:14:01 +0200239 ieee80211_recalc_chanctx_min_def(local, ctx);
Simon Wunderliche4746852013-04-08 22:43:16 +0200240 if (!local->use_chanctx)
241 local->hw.conf.radar_enabled = ctx->conf.radar_enabled;
Michal Kaziord01a1e62012-06-26 14:37:16 +0200242
Johannes Berg34a37402013-12-18 09:43:33 +0100243 /* we hold the mutex to prevent idle from changing */
244 lockdep_assert_held(&local->mtx);
Johannes Berg382a1032013-03-22 22:30:09 +0100245 /* turn idle off *before* setting channel -- some drivers need that */
246 changed = ieee80211_idle_off(local);
247 if (changed)
248 ieee80211_hw_config(local, changed);
249
Johannes Berg55de9082012-07-26 17:24:39 +0200250 if (!local->use_chanctx) {
Karl Beldan675a0b02013-03-25 16:26:57 +0100251 local->_oper_chandef = *chandef;
Johannes Berg55de9082012-07-26 17:24:39 +0200252 ieee80211_hw_config(local, 0);
253 } else {
254 err = drv_add_chanctx(local, ctx);
255 if (err) {
256 kfree(ctx);
Johannes Berg382a1032013-03-22 22:30:09 +0100257 ieee80211_recalc_idle(local);
Johannes Berg34a37402013-12-18 09:43:33 +0100258 return ERR_PTR(err);
Johannes Berg55de9082012-07-26 17:24:39 +0200259 }
Michal Kazior35f2fce2012-06-26 14:37:20 +0200260 }
261
Johannes Berg382a1032013-03-22 22:30:09 +0100262 /* and keep the mutex held until the new chanctx is on the list */
Johannes Berg3448c002012-09-11 17:57:42 +0200263 list_add_rcu(&ctx->list, &local->chanctx_list);
Michal Kaziord01a1e62012-06-26 14:37:16 +0200264
265 return ctx;
266}
267
268static void ieee80211_free_chanctx(struct ieee80211_local *local,
269 struct ieee80211_chanctx *ctx)
270{
Simon Wunderliche4746852013-04-08 22:43:16 +0200271 bool check_single_channel = false;
Michal Kaziord01a1e62012-06-26 14:37:16 +0200272 lockdep_assert_held(&local->chanctx_mtx);
273
274 WARN_ON_ONCE(ctx->refcount != 0);
275
Johannes Berg55de9082012-07-26 17:24:39 +0200276 if (!local->use_chanctx) {
Karl Beldan675a0b02013-03-25 16:26:57 +0100277 struct cfg80211_chan_def *chandef = &local->_oper_chandef;
278 chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
279 chandef->center_freq1 = chandef->chan->center_freq;
280 chandef->center_freq2 = 0;
Simon Wunderliche4746852013-04-08 22:43:16 +0200281
282 /* NOTE: Disabling radar is only valid here for
283 * single channel context. To be sure, check it ...
284 */
285 if (local->hw.conf.radar_enabled)
286 check_single_channel = true;
287 local->hw.conf.radar_enabled = false;
288
Johannes Berg55de9082012-07-26 17:24:39 +0200289 ieee80211_hw_config(local, 0);
290 } else {
291 drv_remove_chanctx(local, ctx);
292 }
Michal Kazior35f2fce2012-06-26 14:37:20 +0200293
Johannes Berg3448c002012-09-11 17:57:42 +0200294 list_del_rcu(&ctx->list);
Michal Kaziord01a1e62012-06-26 14:37:16 +0200295 kfree_rcu(ctx, rcu_head);
Johannes Bergfd0f9792013-02-07 00:14:51 +0100296
Simon Wunderliche4746852013-04-08 22:43:16 +0200297 /* throw a warning if this wasn't the only channel context. */
298 WARN_ON(check_single_channel && !list_empty(&local->chanctx_list));
299
Johannes Bergfd0f9792013-02-07 00:14:51 +0100300 ieee80211_recalc_idle(local);
Michal Kaziord01a1e62012-06-26 14:37:16 +0200301}
302
303static int ieee80211_assign_vif_chanctx(struct ieee80211_sub_if_data *sdata,
304 struct ieee80211_chanctx *ctx)
305{
Michal Kazior35f2fce2012-06-26 14:37:20 +0200306 struct ieee80211_local *local = sdata->local;
307 int ret;
Michal Kaziord01a1e62012-06-26 14:37:16 +0200308
309 lockdep_assert_held(&local->chanctx_mtx);
310
Michal Kazior35f2fce2012-06-26 14:37:20 +0200311 ret = drv_assign_vif_chanctx(local, sdata, ctx);
312 if (ret)
313 return ret;
314
Michal Kaziord01a1e62012-06-26 14:37:16 +0200315 rcu_assign_pointer(sdata->vif.chanctx_conf, &ctx->conf);
316 ctx->refcount++;
317
Johannes Berg1ea6f9c2012-10-24 10:59:25 +0200318 ieee80211_recalc_txpower(sdata);
Eliad Peller21f659b2013-11-11 20:14:01 +0200319 ieee80211_recalc_chanctx_min_def(local, ctx);
Johannes Bergfd0f9792013-02-07 00:14:51 +0100320 sdata->vif.bss_conf.idle = false;
Johannes Berg5bbe754d2013-02-13 13:50:51 +0100321
322 if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
323 sdata->vif.type != NL80211_IFTYPE_MONITOR)
324 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IDLE);
Johannes Berg1ea6f9c2012-10-24 10:59:25 +0200325
Michal Kaziord01a1e62012-06-26 14:37:16 +0200326 return 0;
327}
328
Johannes Berg4bf88532012-11-09 11:39:59 +0100329static void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local,
330 struct ieee80211_chanctx *ctx)
Michal Kaziore89a96f2012-06-26 14:37:22 +0200331{
332 struct ieee80211_chanctx_conf *conf = &ctx->conf;
333 struct ieee80211_sub_if_data *sdata;
Johannes Berg4bf88532012-11-09 11:39:59 +0100334 const struct cfg80211_chan_def *compat = NULL;
Michal Kaziore89a96f2012-06-26 14:37:22 +0200335
336 lockdep_assert_held(&local->chanctx_mtx);
337
338 rcu_read_lock();
339 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
Johannes Berg4bf88532012-11-09 11:39:59 +0100340
Michal Kaziore89a96f2012-06-26 14:37:22 +0200341 if (!ieee80211_sdata_running(sdata))
342 continue;
343 if (rcu_access_pointer(sdata->vif.chanctx_conf) != conf)
344 continue;
345
Johannes Berg4bf88532012-11-09 11:39:59 +0100346 if (!compat)
347 compat = &sdata->vif.bss_conf.chandef;
348
349 compat = cfg80211_chandef_compatible(
350 &sdata->vif.bss_conf.chandef, compat);
351 if (!compat)
352 break;
Michal Kaziore89a96f2012-06-26 14:37:22 +0200353 }
354 rcu_read_unlock();
355
Johannes Berg4bf88532012-11-09 11:39:59 +0100356 if (WARN_ON_ONCE(!compat))
357 return;
Michal Kaziore89a96f2012-06-26 14:37:22 +0200358
Johannes Berg18942d32013-02-07 21:30:37 +0100359 ieee80211_change_chanctx(local, ctx, compat);
Michal Kaziore89a96f2012-06-26 14:37:22 +0200360}
361
Johannes Berg367bbd12013-12-18 09:36:09 +0100362static void ieee80211_recalc_radar_chanctx(struct ieee80211_local *local,
363 struct ieee80211_chanctx *chanctx)
364{
365 bool radar_enabled;
366
367 lockdep_assert_held(&local->chanctx_mtx);
Johannes Berg34a37402013-12-18 09:43:33 +0100368 /* for setting local->radar_detect_enabled */
369 lockdep_assert_held(&local->mtx);
Johannes Berg367bbd12013-12-18 09:36:09 +0100370
371 radar_enabled = ieee80211_is_radar_required(local);
372
373 if (radar_enabled == chanctx->conf.radar_enabled)
374 return;
375
376 chanctx->conf.radar_enabled = radar_enabled;
377 local->radar_detect_enabled = chanctx->conf.radar_enabled;
378
379 if (!local->use_chanctx) {
380 local->hw.conf.radar_enabled = chanctx->conf.radar_enabled;
381 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
382 }
383
384 drv_change_chanctx(local, chanctx, IEEE80211_CHANCTX_CHANGE_RADAR);
385}
386
Michal Kaziord01a1e62012-06-26 14:37:16 +0200387static void ieee80211_unassign_vif_chanctx(struct ieee80211_sub_if_data *sdata,
388 struct ieee80211_chanctx *ctx)
389{
Michal Kazior35f2fce2012-06-26 14:37:20 +0200390 struct ieee80211_local *local = sdata->local;
Michal Kaziord01a1e62012-06-26 14:37:16 +0200391
392 lockdep_assert_held(&local->chanctx_mtx);
393
394 ctx->refcount--;
395 rcu_assign_pointer(sdata->vif.chanctx_conf, NULL);
Michal Kazior35f2fce2012-06-26 14:37:20 +0200396
Johannes Bergfd0f9792013-02-07 00:14:51 +0100397 sdata->vif.bss_conf.idle = true;
Johannes Berg5bbe754d2013-02-13 13:50:51 +0100398
399 if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
400 sdata->vif.type != NL80211_IFTYPE_MONITOR)
401 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IDLE);
Johannes Bergfd0f9792013-02-07 00:14:51 +0100402
Michal Kazior35f2fce2012-06-26 14:37:20 +0200403 drv_unassign_vif_chanctx(local, sdata, ctx);
Michal Kaziore89a96f2012-06-26 14:37:22 +0200404
Johannes Berg04ecd252012-09-11 14:34:12 +0200405 if (ctx->refcount > 0) {
Michal Kaziore89a96f2012-06-26 14:37:22 +0200406 ieee80211_recalc_chanctx_chantype(sdata->local, ctx);
Johannes Berg04ecd252012-09-11 14:34:12 +0200407 ieee80211_recalc_smps_chanctx(local, ctx);
Simon Wunderlich164eb022013-02-08 18:16:20 +0100408 ieee80211_recalc_radar_chanctx(local, ctx);
Eliad Peller21f659b2013-11-11 20:14:01 +0200409 ieee80211_recalc_chanctx_min_def(local, ctx);
Johannes Berg04ecd252012-09-11 14:34:12 +0200410 }
Michal Kaziord01a1e62012-06-26 14:37:16 +0200411}
412
413static void __ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata)
414{
415 struct ieee80211_local *local = sdata->local;
416 struct ieee80211_chanctx_conf *conf;
417 struct ieee80211_chanctx *ctx;
418
419 lockdep_assert_held(&local->chanctx_mtx);
420
421 conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
422 lockdep_is_held(&local->chanctx_mtx));
423 if (!conf)
424 return;
425
426 ctx = container_of(conf, struct ieee80211_chanctx, conf);
427
428 ieee80211_unassign_vif_chanctx(sdata, ctx);
429 if (ctx->refcount == 0)
430 ieee80211_free_chanctx(local, ctx);
431}
432
Johannes Berg04ecd252012-09-11 14:34:12 +0200433void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local,
434 struct ieee80211_chanctx *chanctx)
435{
436 struct ieee80211_sub_if_data *sdata;
437 u8 rx_chains_static, rx_chains_dynamic;
438
439 lockdep_assert_held(&local->chanctx_mtx);
440
441 rx_chains_static = 1;
442 rx_chains_dynamic = 1;
443
444 rcu_read_lock();
445 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
446 u8 needed_static, needed_dynamic;
447
448 if (!ieee80211_sdata_running(sdata))
449 continue;
450
451 if (rcu_access_pointer(sdata->vif.chanctx_conf) !=
452 &chanctx->conf)
453 continue;
454
455 switch (sdata->vif.type) {
456 case NL80211_IFTYPE_P2P_DEVICE:
457 continue;
458 case NL80211_IFTYPE_STATION:
459 if (!sdata->u.mgd.associated)
460 continue;
461 break;
462 case NL80211_IFTYPE_AP_VLAN:
463 continue;
464 case NL80211_IFTYPE_AP:
465 case NL80211_IFTYPE_ADHOC:
466 case NL80211_IFTYPE_WDS:
467 case NL80211_IFTYPE_MESH_POINT:
468 break;
469 default:
470 WARN_ON_ONCE(1);
471 }
472
473 switch (sdata->smps_mode) {
474 default:
475 WARN_ONCE(1, "Invalid SMPS mode %d\n",
476 sdata->smps_mode);
477 /* fall through */
478 case IEEE80211_SMPS_OFF:
479 needed_static = sdata->needed_rx_chains;
480 needed_dynamic = sdata->needed_rx_chains;
481 break;
482 case IEEE80211_SMPS_DYNAMIC:
483 needed_static = 1;
484 needed_dynamic = sdata->needed_rx_chains;
485 break;
486 case IEEE80211_SMPS_STATIC:
487 needed_static = 1;
488 needed_dynamic = 1;
489 break;
490 }
491
492 rx_chains_static = max(rx_chains_static, needed_static);
493 rx_chains_dynamic = max(rx_chains_dynamic, needed_dynamic);
494 }
495 rcu_read_unlock();
496
497 if (!local->use_chanctx) {
498 if (rx_chains_static > 1)
499 local->smps_mode = IEEE80211_SMPS_OFF;
500 else if (rx_chains_dynamic > 1)
501 local->smps_mode = IEEE80211_SMPS_DYNAMIC;
502 else
503 local->smps_mode = IEEE80211_SMPS_STATIC;
504 ieee80211_hw_config(local, 0);
505 }
506
507 if (rx_chains_static == chanctx->conf.rx_chains_static &&
508 rx_chains_dynamic == chanctx->conf.rx_chains_dynamic)
509 return;
510
511 chanctx->conf.rx_chains_static = rx_chains_static;
512 chanctx->conf.rx_chains_dynamic = rx_chains_dynamic;
513 drv_change_chanctx(local, chanctx, IEEE80211_CHANCTX_CHANGE_RX_CHAINS);
514}
515
Michal Kaziord01a1e62012-06-26 14:37:16 +0200516int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
Johannes Berg4bf88532012-11-09 11:39:59 +0100517 const struct cfg80211_chan_def *chandef,
Michal Kaziord01a1e62012-06-26 14:37:16 +0200518 enum ieee80211_chanctx_mode mode)
519{
520 struct ieee80211_local *local = sdata->local;
521 struct ieee80211_chanctx *ctx;
522 int ret;
523
Johannes Berg34a37402013-12-18 09:43:33 +0100524 lockdep_assert_held(&local->mtx);
525
Johannes Berg55de9082012-07-26 17:24:39 +0200526 WARN_ON(sdata->dev && netif_carrier_ok(sdata->dev));
527
Michal Kaziord01a1e62012-06-26 14:37:16 +0200528 mutex_lock(&local->chanctx_mtx);
529 __ieee80211_vif_release_channel(sdata);
530
Johannes Berg4bf88532012-11-09 11:39:59 +0100531 ctx = ieee80211_find_chanctx(local, chandef, mode);
Michal Kaziord01a1e62012-06-26 14:37:16 +0200532 if (!ctx)
Johannes Berg4bf88532012-11-09 11:39:59 +0100533 ctx = ieee80211_new_chanctx(local, chandef, mode);
Michal Kaziord01a1e62012-06-26 14:37:16 +0200534 if (IS_ERR(ctx)) {
535 ret = PTR_ERR(ctx);
536 goto out;
537 }
538
Johannes Berg4bf88532012-11-09 11:39:59 +0100539 sdata->vif.bss_conf.chandef = *chandef;
Johannes Berg55de9082012-07-26 17:24:39 +0200540
Michal Kaziord01a1e62012-06-26 14:37:16 +0200541 ret = ieee80211_assign_vif_chanctx(sdata, ctx);
542 if (ret) {
543 /* if assign fails refcount stays the same */
544 if (ctx->refcount == 0)
545 ieee80211_free_chanctx(local, ctx);
546 goto out;
547 }
548
Johannes Berg04ecd252012-09-11 14:34:12 +0200549 ieee80211_recalc_smps_chanctx(local, ctx);
Simon Wunderlich164eb022013-02-08 18:16:20 +0100550 ieee80211_recalc_radar_chanctx(local, ctx);
Michal Kaziord01a1e62012-06-26 14:37:16 +0200551 out:
552 mutex_unlock(&local->chanctx_mtx);
553 return ret;
554}
555
Simon Wunderlich73da7d52013-07-11 16:09:06 +0200556int ieee80211_vif_change_channel(struct ieee80211_sub_if_data *sdata,
Simon Wunderlich73da7d52013-07-11 16:09:06 +0200557 u32 *changed)
558{
559 struct ieee80211_local *local = sdata->local;
560 struct ieee80211_chanctx_conf *conf;
561 struct ieee80211_chanctx *ctx;
Luciano Coelho33787fc2013-11-11 20:34:54 +0200562 const struct cfg80211_chan_def *chandef = &sdata->csa_chandef;
Simon Wunderlich73da7d52013-07-11 16:09:06 +0200563 int ret;
564 u32 chanctx_changed = 0;
565
Johannes Berg34a37402013-12-18 09:43:33 +0100566 lockdep_assert_held(&local->mtx);
567
Simon Wunderlich73da7d52013-07-11 16:09:06 +0200568 /* should never be called if not performing a channel switch. */
569 if (WARN_ON(!sdata->vif.csa_active))
570 return -EINVAL;
571
572 if (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef,
573 IEEE80211_CHAN_DISABLED))
574 return -EINVAL;
575
576 mutex_lock(&local->chanctx_mtx);
577 conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
578 lockdep_is_held(&local->chanctx_mtx));
579 if (!conf) {
580 ret = -EINVAL;
581 goto out;
582 }
583
584 ctx = container_of(conf, struct ieee80211_chanctx, conf);
585 if (ctx->refcount != 1) {
586 ret = -EINVAL;
587 goto out;
588 }
589
590 if (sdata->vif.bss_conf.chandef.width != chandef->width) {
591 chanctx_changed = IEEE80211_CHANCTX_CHANGE_WIDTH;
592 *changed |= BSS_CHANGED_BANDWIDTH;
593 }
594
595 sdata->vif.bss_conf.chandef = *chandef;
596 ctx->conf.def = *chandef;
597
598 chanctx_changed |= IEEE80211_CHANCTX_CHANGE_CHANNEL;
599 drv_change_chanctx(local, ctx, chanctx_changed);
600
Simon Wunderlich73da7d52013-07-11 16:09:06 +0200601 ieee80211_recalc_chanctx_chantype(local, ctx);
602 ieee80211_recalc_smps_chanctx(local, ctx);
603 ieee80211_recalc_radar_chanctx(local, ctx);
Eliad Peller21f659b2013-11-11 20:14:01 +0200604 ieee80211_recalc_chanctx_min_def(local, ctx);
Simon Wunderlich73da7d52013-07-11 16:09:06 +0200605
606 ret = 0;
607 out:
608 mutex_unlock(&local->chanctx_mtx);
609 return ret;
610}
611
Johannes Berg2c9b7352013-02-07 21:37:29 +0100612int ieee80211_vif_change_bandwidth(struct ieee80211_sub_if_data *sdata,
613 const struct cfg80211_chan_def *chandef,
614 u32 *changed)
615{
616 struct ieee80211_local *local = sdata->local;
617 struct ieee80211_chanctx_conf *conf;
618 struct ieee80211_chanctx *ctx;
619 int ret;
620
621 if (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef,
622 IEEE80211_CHAN_DISABLED))
623 return -EINVAL;
624
625 mutex_lock(&local->chanctx_mtx);
626 if (cfg80211_chandef_identical(chandef, &sdata->vif.bss_conf.chandef)) {
627 ret = 0;
628 goto out;
629 }
630
631 if (chandef->width == NL80211_CHAN_WIDTH_20_NOHT ||
632 sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT) {
633 ret = -EINVAL;
634 goto out;
635 }
636
637 conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
638 lockdep_is_held(&local->chanctx_mtx));
639 if (!conf) {
640 ret = -EINVAL;
641 goto out;
642 }
643
644 ctx = container_of(conf, struct ieee80211_chanctx, conf);
645 if (!cfg80211_chandef_compatible(&conf->def, chandef)) {
646 ret = -EINVAL;
647 goto out;
648 }
649
650 sdata->vif.bss_conf.chandef = *chandef;
651
652 ieee80211_recalc_chanctx_chantype(local, ctx);
653
654 *changed |= BSS_CHANGED_BANDWIDTH;
655 ret = 0;
656 out:
657 mutex_unlock(&local->chanctx_mtx);
658 return ret;
659}
660
Michal Kaziord01a1e62012-06-26 14:37:16 +0200661void ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata)
662{
Johannes Berg55de9082012-07-26 17:24:39 +0200663 WARN_ON(sdata->dev && netif_carrier_ok(sdata->dev));
664
Johannes Berg34a37402013-12-18 09:43:33 +0100665 lockdep_assert_held(&sdata->local->mtx);
666
Michal Kaziord01a1e62012-06-26 14:37:16 +0200667 mutex_lock(&sdata->local->chanctx_mtx);
668 __ieee80211_vif_release_channel(sdata);
669 mutex_unlock(&sdata->local->chanctx_mtx);
670}
Johannes Berg3448c002012-09-11 17:57:42 +0200671
Johannes Berg4d76d212012-12-11 20:38:41 +0100672void ieee80211_vif_vlan_copy_chanctx(struct ieee80211_sub_if_data *sdata)
673{
674 struct ieee80211_local *local = sdata->local;
675 struct ieee80211_sub_if_data *ap;
676 struct ieee80211_chanctx_conf *conf;
677
678 if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->bss))
679 return;
680
681 ap = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap);
682
683 mutex_lock(&local->chanctx_mtx);
684
685 conf = rcu_dereference_protected(ap->vif.chanctx_conf,
686 lockdep_is_held(&local->chanctx_mtx));
687 rcu_assign_pointer(sdata->vif.chanctx_conf, conf);
688 mutex_unlock(&local->chanctx_mtx);
689}
690
Johannes Berg1f4ac5a2013-02-08 12:07:44 +0100691void ieee80211_vif_copy_chanctx_to_vlans(struct ieee80211_sub_if_data *sdata,
692 bool clear)
693{
694 struct ieee80211_local *local = sdata->local;
695 struct ieee80211_sub_if_data *vlan;
696 struct ieee80211_chanctx_conf *conf;
697
698 ASSERT_RTNL();
699
700 if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_AP))
701 return;
702
703 mutex_lock(&local->chanctx_mtx);
704
705 /*
706 * Check that conf exists, even when clearing this function
707 * must be called with the AP's channel context still there
708 * as it would otherwise cause VLANs to have an invalid
709 * channel context pointer for a while, possibly pointing
710 * to a channel context that has already been freed.
711 */
712 conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
713 lockdep_is_held(&local->chanctx_mtx));
714 WARN_ON(!conf);
715
716 if (clear)
717 conf = NULL;
718
719 list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
720 rcu_assign_pointer(vlan->vif.chanctx_conf, conf);
721
722 mutex_unlock(&local->chanctx_mtx);
723}
724
Johannes Berg3448c002012-09-11 17:57:42 +0200725void ieee80211_iter_chan_contexts_atomic(
726 struct ieee80211_hw *hw,
727 void (*iter)(struct ieee80211_hw *hw,
728 struct ieee80211_chanctx_conf *chanctx_conf,
729 void *data),
730 void *iter_data)
731{
732 struct ieee80211_local *local = hw_to_local(hw);
733 struct ieee80211_chanctx *ctx;
734
735 rcu_read_lock();
736 list_for_each_entry_rcu(ctx, &local->chanctx_list, list)
Johannes Berg8a61af62012-12-13 17:42:30 +0100737 if (ctx->driver_present)
738 iter(hw, &ctx->conf, iter_data);
Johannes Berg3448c002012-09-11 17:57:42 +0200739 rcu_read_unlock();
740}
741EXPORT_SYMBOL_GPL(ieee80211_iter_chan_contexts_atomic);