blob: 7334c9b09e82ce7aedd0f15c09d0f113537b8cf9 [file] [log] [blame]
Lorenzo Bianconic774d572014-09-16 02:13:09 +02001/*
2 * Copyright (c) 2014, Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "ath9k.h"
18#include "hw.h"
19#include "dynack.h"
20
21#define COMPUTE_TO (5 * HZ)
22#define LATEACK_DELAY (10 * HZ)
23#define LATEACK_TO 256
24#define MAX_DELAY 300
25#define EWMA_LEVEL 96
26#define EWMA_DIV 128
27
28/**
29 * ath_dynack_ewma - EWMA (Exponentially Weighted Moving Average) calculation
30 *
31 */
32static inline u32 ath_dynack_ewma(u32 old, u32 new)
33{
34 return (new * (EWMA_DIV - EWMA_LEVEL) + old * EWMA_LEVEL) / EWMA_DIV;
35}
36
37/**
38 * ath_dynack_get_sifs - get sifs time based on phy used
39 * @ah: ath hw
40 * @phy: phy used
41 *
42 */
43static inline u32 ath_dynack_get_sifs(struct ath_hw *ah, int phy)
44{
45 u32 sifs = CCK_SIFS_TIME;
46
47 if (phy == WLAN_RC_PHY_OFDM) {
48 if (IS_CHAN_QUARTER_RATE(ah->curchan))
49 sifs = OFDM_SIFS_TIME_QUARTER;
50 else if (IS_CHAN_HALF_RATE(ah->curchan))
51 sifs = OFDM_SIFS_TIME_HALF;
52 else
53 sifs = OFDM_SIFS_TIME;
54 }
55 return sifs;
56}
57
58/**
59 * ath_dynack_bssidmask - filter out ACK frames based on BSSID mask
60 * @ah: ath hw
61 * @mac: receiver address
62 */
63static inline bool ath_dynack_bssidmask(struct ath_hw *ah, const u8 *mac)
64{
65 int i;
66 struct ath_common *common = ath9k_hw_common(ah);
67
68 for (i = 0; i < ETH_ALEN; i++) {
69 if ((common->macaddr[i] & common->bssidmask[i]) !=
70 (mac[i] & common->bssidmask[i]))
71 return false;
72 }
73
74 return true;
75}
76
77/**
78 * ath_dynack_compute_ackto - compute ACK timeout as the maximum STA timeout
79 * @ah: ath hw
80 *
81 * should be called while holding qlock
82 */
83static void ath_dynack_compute_ackto(struct ath_hw *ah)
84{
85 struct ath_node *an;
86 u32 to = 0;
87 struct ath_dynack *da = &ah->dynack;
88 struct ath_common *common = ath9k_hw_common(ah);
89
90 list_for_each_entry(an, &da->nodes, list)
91 if (an->ackto > to)
92 to = an->ackto;
93
94 if (to && da->ackto != to) {
95 u32 slottime;
96
97 slottime = (to - 3) / 2;
98 da->ackto = to;
99 ath_dbg(common, DYNACK, "ACK timeout %u slottime %u\n",
100 da->ackto, slottime);
101 ath9k_hw_setslottime(ah, slottime);
102 ath9k_hw_set_ack_timeout(ah, da->ackto);
103 ath9k_hw_set_cts_timeout(ah, da->ackto);
104 }
105}
106
107/**
108 * ath_dynack_compute_to - compute STA ACK timeout
109 * @ah: ath hw
110 *
111 * should be called while holding qlock
112 */
113static void ath_dynack_compute_to(struct ath_hw *ah)
114{
115 u32 ackto, ack_ts;
116 u8 *dst, *src;
117 struct ieee80211_sta *sta;
118 struct ath_node *an;
119 struct ts_info *st_ts;
120 struct ath_dynack *da = &ah->dynack;
121
122 rcu_read_lock();
123
124 while (da->st_rbf.h_rb != da->st_rbf.t_rb &&
125 da->ack_rbf.h_rb != da->ack_rbf.t_rb) {
126 ack_ts = da->ack_rbf.tstamp[da->ack_rbf.h_rb];
127 st_ts = &da->st_rbf.ts[da->st_rbf.h_rb];
128 dst = da->st_rbf.addr[da->st_rbf.h_rb].h_dest;
129 src = da->st_rbf.addr[da->st_rbf.h_rb].h_src;
130
131 ath_dbg(ath9k_hw_common(ah), DYNACK,
132 "ack_ts %u st_ts %u st_dur %u [%u-%u]\n",
133 ack_ts, st_ts->tstamp, st_ts->dur,
134 da->ack_rbf.h_rb, da->st_rbf.h_rb);
135
136 if (ack_ts > st_ts->tstamp + st_ts->dur) {
137 ackto = ack_ts - st_ts->tstamp - st_ts->dur;
138
139 if (ackto < MAX_DELAY) {
140 sta = ieee80211_find_sta_by_ifaddr(ah->hw, dst,
141 src);
142 if (sta) {
143 an = (struct ath_node *)sta->drv_priv;
144 an->ackto = ath_dynack_ewma(an->ackto,
145 ackto);
146 ath_dbg(ath9k_hw_common(ah), DYNACK,
147 "%pM to %u\n", dst, an->ackto);
148 if (time_is_before_jiffies(da->lto)) {
149 ath_dynack_compute_ackto(ah);
150 da->lto = jiffies + COMPUTE_TO;
151 }
152 }
153 INCR(da->ack_rbf.h_rb, ATH_DYN_BUF);
154 }
155 INCR(da->st_rbf.h_rb, ATH_DYN_BUF);
156 } else {
157 INCR(da->ack_rbf.h_rb, ATH_DYN_BUF);
158 }
159 }
160
161 rcu_read_unlock();
162}
163
164/**
165 * ath_dynack_sample_tx_ts - status timestamp sampling method
166 * @ah: ath hw
167 * @skb: socket buffer
168 * @ts: tx status info
169 *
170 */
171void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb,
172 struct ath_tx_status *ts)
173{
174 u8 ridx;
175 struct ieee80211_hdr *hdr;
176 struct ath_dynack *da = &ah->dynack;
177 struct ath_common *common = ath9k_hw_common(ah);
178 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
179
180 if ((info->flags & IEEE80211_TX_CTL_NO_ACK) || !da->enabled)
181 return;
182
183 spin_lock_bh(&da->qlock);
184
185 hdr = (struct ieee80211_hdr *)skb->data;
186
187 /* late ACK */
188 if (ts->ts_status & ATH9K_TXERR_XRETRY) {
189 if (ieee80211_is_assoc_req(hdr->frame_control) ||
190 ieee80211_is_assoc_resp(hdr->frame_control)) {
191 ath_dbg(common, DYNACK, "late ack\n");
192 ath9k_hw_setslottime(ah, (LATEACK_TO - 3) / 2);
193 ath9k_hw_set_ack_timeout(ah, LATEACK_TO);
194 ath9k_hw_set_cts_timeout(ah, LATEACK_TO);
195 da->lto = jiffies + LATEACK_DELAY;
196 }
197
198 spin_unlock_bh(&da->qlock);
199 return;
200 }
201
202 ridx = ts->ts_rateindex;
203
204 da->st_rbf.ts[da->st_rbf.t_rb].tstamp = ts->ts_tstamp;
Felix Fietkau315dd112014-09-30 11:24:23 +0200205 da->st_rbf.ts[da->st_rbf.t_rb].dur = ts->duration;
Lorenzo Bianconic774d572014-09-16 02:13:09 +0200206 ether_addr_copy(da->st_rbf.addr[da->st_rbf.t_rb].h_dest, hdr->addr1);
207 ether_addr_copy(da->st_rbf.addr[da->st_rbf.t_rb].h_src, hdr->addr2);
208
209 if (!(info->status.rates[ridx].flags & IEEE80211_TX_RC_MCS)) {
210 u32 phy, sifs;
211 const struct ieee80211_rate *rate;
212 struct ieee80211_tx_rate *rates = info->status.rates;
213
214 rate = &common->sbands[info->band].bitrates[rates[ridx].idx];
Johannes Berg57fbcce2016-04-12 15:56:15 +0200215 if (info->band == NL80211_BAND_2GHZ &&
Lorenzo Bianconic774d572014-09-16 02:13:09 +0200216 !(rate->flags & IEEE80211_RATE_ERP_G))
217 phy = WLAN_RC_PHY_CCK;
218 else
219 phy = WLAN_RC_PHY_OFDM;
220
221 sifs = ath_dynack_get_sifs(ah, phy);
222 da->st_rbf.ts[da->st_rbf.t_rb].dur -= sifs;
223 }
224
225 ath_dbg(common, DYNACK, "{%pM} tx sample %u [dur %u][h %u-t %u]\n",
226 hdr->addr1, da->st_rbf.ts[da->st_rbf.t_rb].tstamp,
227 da->st_rbf.ts[da->st_rbf.t_rb].dur, da->st_rbf.h_rb,
228 (da->st_rbf.t_rb + 1) % ATH_DYN_BUF);
229
230 INCR(da->st_rbf.t_rb, ATH_DYN_BUF);
231 if (da->st_rbf.t_rb == da->st_rbf.h_rb)
232 INCR(da->st_rbf.h_rb, ATH_DYN_BUF);
233
234 ath_dynack_compute_to(ah);
235
236 spin_unlock_bh(&da->qlock);
237}
238EXPORT_SYMBOL(ath_dynack_sample_tx_ts);
239
240/**
241 * ath_dynack_sample_ack_ts - ACK timestamp sampling method
242 * @ah: ath hw
243 * @skb: socket buffer
244 * @ts: rx timestamp
245 *
246 */
247void ath_dynack_sample_ack_ts(struct ath_hw *ah, struct sk_buff *skb,
248 u32 ts)
249{
250 struct ath_dynack *da = &ah->dynack;
251 struct ath_common *common = ath9k_hw_common(ah);
252 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
253
254 if (!ath_dynack_bssidmask(ah, hdr->addr1) || !da->enabled)
255 return;
256
257 spin_lock_bh(&da->qlock);
258 da->ack_rbf.tstamp[da->ack_rbf.t_rb] = ts;
259
260 ath_dbg(common, DYNACK, "rx sample %u [h %u-t %u]\n",
261 da->ack_rbf.tstamp[da->ack_rbf.t_rb],
262 da->ack_rbf.h_rb, (da->ack_rbf.t_rb + 1) % ATH_DYN_BUF);
263
264 INCR(da->ack_rbf.t_rb, ATH_DYN_BUF);
265 if (da->ack_rbf.t_rb == da->ack_rbf.h_rb)
266 INCR(da->ack_rbf.h_rb, ATH_DYN_BUF);
267
268 ath_dynack_compute_to(ah);
269
270 spin_unlock_bh(&da->qlock);
271}
272EXPORT_SYMBOL(ath_dynack_sample_ack_ts);
273
274/**
275 * ath_dynack_node_init - init ath_node related info
276 * @ah: ath hw
277 * @an: ath node
278 *
279 */
280void ath_dynack_node_init(struct ath_hw *ah, struct ath_node *an)
281{
282 /* ackto = slottime + sifs + air delay */
Benjamin Berg11b0ac22016-07-04 14:37:24 +0200283 u32 ackto = 9 + 16 + 64;
Lorenzo Bianconic774d572014-09-16 02:13:09 +0200284 struct ath_dynack *da = &ah->dynack;
285
286 an->ackto = ackto;
287
288 spin_lock(&da->qlock);
289 list_add_tail(&an->list, &da->nodes);
290 spin_unlock(&da->qlock);
291}
292EXPORT_SYMBOL(ath_dynack_node_init);
293
294/**
295 * ath_dynack_node_deinit - deinit ath_node related info
296 * @ah: ath hw
297 * @an: ath node
298 *
299 */
300void ath_dynack_node_deinit(struct ath_hw *ah, struct ath_node *an)
301{
302 struct ath_dynack *da = &ah->dynack;
303
304 spin_lock(&da->qlock);
305 list_del(&an->list);
306 spin_unlock(&da->qlock);
307}
308EXPORT_SYMBOL(ath_dynack_node_deinit);
309
310/**
311 * ath_dynack_reset - reset dynack processing
312 * @ah: ath hw
313 *
314 */
315void ath_dynack_reset(struct ath_hw *ah)
316{
317 /* ackto = slottime + sifs + air delay */
Benjamin Berg11b0ac22016-07-04 14:37:24 +0200318 u32 ackto = 9 + 16 + 64;
Lorenzo Bianconic774d572014-09-16 02:13:09 +0200319 struct ath_dynack *da = &ah->dynack;
320
321 da->lto = jiffies;
322 da->ackto = ackto;
323
324 da->st_rbf.t_rb = 0;
325 da->st_rbf.h_rb = 0;
326 da->ack_rbf.t_rb = 0;
327 da->ack_rbf.h_rb = 0;
328
329 /* init acktimeout */
330 ath9k_hw_setslottime(ah, (ackto - 3) / 2);
331 ath9k_hw_set_ack_timeout(ah, ackto);
332 ath9k_hw_set_cts_timeout(ah, ackto);
333}
334EXPORT_SYMBOL(ath_dynack_reset);
335
336/**
337 * ath_dynack_init - init dynack data structure
338 * @ah: ath hw
339 *
340 */
341void ath_dynack_init(struct ath_hw *ah)
342{
343 struct ath_dynack *da = &ah->dynack;
344
345 memset(da, 0, sizeof(struct ath_dynack));
346
347 spin_lock_init(&da->qlock);
348 INIT_LIST_HEAD(&da->nodes);
349
350 ah->hw->wiphy->features |= NL80211_FEATURE_ACKTO_ESTIMATION;
351}