blob: 2840d7bf9e67eff12c9b743c04eb01c0120763cd [file] [log] [blame]
David Vrabel8cc13a02008-09-17 16:34:09 +01001/*
2 * UWB DRP IE management.
3 *
4 * Copyright (C) 2005-2006 Intel Corporation
5 * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
David Vrabel8cc13a02008-09-17 16:34:09 +010019#include <linux/kernel.h>
20#include <linux/random.h>
21#include <linux/uwb.h>
22
23#include "uwb-internal.h"
24
Stefano Panella5b377172008-12-12 13:00:06 +000025
26/*
27 * Return the reason code for a reservations's DRP IE.
28 */
29int uwb_rsv_reason_code(struct uwb_rsv *rsv)
30{
31 static const int reason_codes[] = {
32 [UWB_RSV_STATE_O_INITIATED] = UWB_DRP_REASON_ACCEPTED,
33 [UWB_RSV_STATE_O_PENDING] = UWB_DRP_REASON_ACCEPTED,
34 [UWB_RSV_STATE_O_MODIFIED] = UWB_DRP_REASON_MODIFIED,
35 [UWB_RSV_STATE_O_ESTABLISHED] = UWB_DRP_REASON_ACCEPTED,
36 [UWB_RSV_STATE_O_TO_BE_MOVED] = UWB_DRP_REASON_ACCEPTED,
37 [UWB_RSV_STATE_O_MOVE_COMBINING] = UWB_DRP_REASON_MODIFIED,
38 [UWB_RSV_STATE_O_MOVE_REDUCING] = UWB_DRP_REASON_MODIFIED,
39 [UWB_RSV_STATE_O_MOVE_EXPANDING] = UWB_DRP_REASON_ACCEPTED,
40 [UWB_RSV_STATE_T_ACCEPTED] = UWB_DRP_REASON_ACCEPTED,
41 [UWB_RSV_STATE_T_CONFLICT] = UWB_DRP_REASON_CONFLICT,
42 [UWB_RSV_STATE_T_PENDING] = UWB_DRP_REASON_PENDING,
43 [UWB_RSV_STATE_T_DENIED] = UWB_DRP_REASON_DENIED,
44 [UWB_RSV_STATE_T_RESIZED] = UWB_DRP_REASON_ACCEPTED,
45 [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = UWB_DRP_REASON_ACCEPTED,
46 [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = UWB_DRP_REASON_CONFLICT,
47 [UWB_RSV_STATE_T_EXPANDING_PENDING] = UWB_DRP_REASON_PENDING,
48 [UWB_RSV_STATE_T_EXPANDING_DENIED] = UWB_DRP_REASON_DENIED,
49 };
50
51 return reason_codes[rsv->state];
52}
53
54/*
55 * Return the reason code for a reservations's companion DRP IE .
56 */
57int uwb_rsv_companion_reason_code(struct uwb_rsv *rsv)
58{
59 static const int companion_reason_codes[] = {
60 [UWB_RSV_STATE_O_MOVE_EXPANDING] = UWB_DRP_REASON_ACCEPTED,
61 [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = UWB_DRP_REASON_ACCEPTED,
62 [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = UWB_DRP_REASON_CONFLICT,
63 [UWB_RSV_STATE_T_EXPANDING_PENDING] = UWB_DRP_REASON_PENDING,
64 [UWB_RSV_STATE_T_EXPANDING_DENIED] = UWB_DRP_REASON_DENIED,
65 };
66
67 return companion_reason_codes[rsv->state];
68}
69
70/*
71 * Return the status bit for a reservations's DRP IE.
72 */
73int uwb_rsv_status(struct uwb_rsv *rsv)
74{
75 static const int statuses[] = {
76 [UWB_RSV_STATE_O_INITIATED] = 0,
77 [UWB_RSV_STATE_O_PENDING] = 0,
78 [UWB_RSV_STATE_O_MODIFIED] = 1,
79 [UWB_RSV_STATE_O_ESTABLISHED] = 1,
80 [UWB_RSV_STATE_O_TO_BE_MOVED] = 0,
81 [UWB_RSV_STATE_O_MOVE_COMBINING] = 1,
82 [UWB_RSV_STATE_O_MOVE_REDUCING] = 1,
83 [UWB_RSV_STATE_O_MOVE_EXPANDING] = 1,
84 [UWB_RSV_STATE_T_ACCEPTED] = 1,
85 [UWB_RSV_STATE_T_CONFLICT] = 0,
86 [UWB_RSV_STATE_T_PENDING] = 0,
87 [UWB_RSV_STATE_T_DENIED] = 0,
88 [UWB_RSV_STATE_T_RESIZED] = 1,
89 [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = 1,
90 [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = 1,
91 [UWB_RSV_STATE_T_EXPANDING_PENDING] = 1,
92 [UWB_RSV_STATE_T_EXPANDING_DENIED] = 1,
93
94 };
95
96 return statuses[rsv->state];
97}
98
99/*
100 * Return the status bit for a reservations's companion DRP IE .
101 */
102int uwb_rsv_companion_status(struct uwb_rsv *rsv)
103{
104 static const int companion_statuses[] = {
105 [UWB_RSV_STATE_O_MOVE_EXPANDING] = 0,
106 [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = 1,
107 [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = 0,
108 [UWB_RSV_STATE_T_EXPANDING_PENDING] = 0,
109 [UWB_RSV_STATE_T_EXPANDING_DENIED] = 0,
110 };
111
112 return companion_statuses[rsv->state];
113}
114
David Vrabel8cc13a02008-09-17 16:34:09 +0100115/*
116 * Allocate a DRP IE.
117 *
118 * To save having to free/allocate a DRP IE when its MAS changes,
119 * enough memory is allocated for the maxiumum number of DRP
120 * allocation fields. This gives an overhead per reservation of up to
121 * (UWB_NUM_ZONES - 1) * 4 = 60 octets.
122 */
123static struct uwb_ie_drp *uwb_drp_ie_alloc(void)
124{
125 struct uwb_ie_drp *drp_ie;
David Vrabel8cc13a02008-09-17 16:34:09 +0100126
127 drp_ie = kzalloc(sizeof(struct uwb_ie_drp) +
128 UWB_NUM_ZONES * sizeof(struct uwb_drp_alloc),
129 GFP_KERNEL);
130 if (drp_ie) {
131 drp_ie->hdr.element_id = UWB_IE_DRP;
David Vrabel8cc13a02008-09-17 16:34:09 +0100132 }
133 return drp_ie;
134}
135
136
137/*
138 * Fill a DRP IE's allocation fields from a MAS bitmap.
139 */
140static void uwb_drp_ie_from_bm(struct uwb_ie_drp *drp_ie,
141 struct uwb_mas_bm *mas)
142{
143 int z, i, num_fields = 0, next = 0;
144 struct uwb_drp_alloc *zones;
145 __le16 current_bmp;
146 DECLARE_BITMAP(tmp_bmp, UWB_NUM_MAS);
147 DECLARE_BITMAP(tmp_mas_bm, UWB_MAS_PER_ZONE);
148
149 zones = drp_ie->allocs;
150
151 bitmap_copy(tmp_bmp, mas->bm, UWB_NUM_MAS);
152
153 /* Determine unique MAS bitmaps in zones from bitmap. */
154 for (z = 0; z < UWB_NUM_ZONES; z++) {
155 bitmap_copy(tmp_mas_bm, tmp_bmp, UWB_MAS_PER_ZONE);
156 if (bitmap_weight(tmp_mas_bm, UWB_MAS_PER_ZONE) > 0) {
157 bool found = false;
158 current_bmp = (__le16) *tmp_mas_bm;
159 for (i = 0; i < next; i++) {
160 if (current_bmp == zones[i].mas_bm) {
161 zones[i].zone_bm |= 1 << z;
162 found = true;
163 break;
164 }
165 }
166 if (!found) {
167 num_fields++;
168 zones[next].zone_bm = 1 << z;
169 zones[next].mas_bm = current_bmp;
170 next++;
171 }
172 }
173 bitmap_shift_right(tmp_bmp, tmp_bmp, UWB_MAS_PER_ZONE, UWB_NUM_MAS);
174 }
175
176 /* Store in format ready for transmission (le16). */
177 for (i = 0; i < num_fields; i++) {
178 drp_ie->allocs[i].zone_bm = cpu_to_le16(zones[i].zone_bm);
179 drp_ie->allocs[i].mas_bm = cpu_to_le16(zones[i].mas_bm);
180 }
181
182 drp_ie->hdr.length = sizeof(struct uwb_ie_drp) - sizeof(struct uwb_ie_hdr)
183 + num_fields * sizeof(struct uwb_drp_alloc);
184}
185
186/**
187 * uwb_drp_ie_update - update a reservation's DRP IE
188 * @rsv: the reservation
189 */
190int uwb_drp_ie_update(struct uwb_rsv *rsv)
191{
David Vrabel8cc13a02008-09-17 16:34:09 +0100192 struct uwb_ie_drp *drp_ie;
Stefano Panella5b377172008-12-12 13:00:06 +0000193 struct uwb_rsv_move *mv;
194 int unsafe;
David Vrabel8cc13a02008-09-17 16:34:09 +0100195
Stefano Panella5b377172008-12-12 13:00:06 +0000196 if (rsv->state == UWB_RSV_STATE_NONE) {
David Vrabel8cc13a02008-09-17 16:34:09 +0100197 kfree(rsv->drp_ie);
198 rsv->drp_ie = NULL;
199 return 0;
David Vrabel8cc13a02008-09-17 16:34:09 +0100200 }
Stefano Panella5b377172008-12-12 13:00:06 +0000201
202 unsafe = rsv->mas.unsafe ? 1 : 0;
David Vrabel8cc13a02008-09-17 16:34:09 +0100203
204 if (rsv->drp_ie == NULL) {
205 rsv->drp_ie = uwb_drp_ie_alloc();
206 if (rsv->drp_ie == NULL)
207 return -ENOMEM;
208 }
209 drp_ie = rsv->drp_ie;
210
Stefano Panella5b377172008-12-12 13:00:06 +0000211 uwb_ie_drp_set_unsafe(drp_ie, unsafe);
212 uwb_ie_drp_set_tiebreaker(drp_ie, rsv->tiebreaker);
David Vrabel8cc13a02008-09-17 16:34:09 +0100213 uwb_ie_drp_set_owner(drp_ie, uwb_rsv_is_owner(rsv));
Stefano Panella5b377172008-12-12 13:00:06 +0000214 uwb_ie_drp_set_status(drp_ie, uwb_rsv_status(rsv));
215 uwb_ie_drp_set_reason_code(drp_ie, uwb_rsv_reason_code(rsv));
David Vrabel8cc13a02008-09-17 16:34:09 +0100216 uwb_ie_drp_set_stream_index(drp_ie, rsv->stream);
217 uwb_ie_drp_set_type(drp_ie, rsv->type);
218
219 if (uwb_rsv_is_owner(rsv)) {
220 switch (rsv->target.type) {
221 case UWB_RSV_TARGET_DEV:
222 drp_ie->dev_addr = rsv->target.dev->dev_addr;
223 break;
224 case UWB_RSV_TARGET_DEVADDR:
225 drp_ie->dev_addr = rsv->target.devaddr;
226 break;
227 }
228 } else
229 drp_ie->dev_addr = rsv->owner->dev_addr;
230
231 uwb_drp_ie_from_bm(drp_ie, &rsv->mas);
232
Stefano Panella5b377172008-12-12 13:00:06 +0000233 if (uwb_rsv_has_two_drp_ies(rsv)) {
234 mv = &rsv->mv;
235 if (mv->companion_drp_ie == NULL) {
236 mv->companion_drp_ie = uwb_drp_ie_alloc();
237 if (mv->companion_drp_ie == NULL)
238 return -ENOMEM;
239 }
240 drp_ie = mv->companion_drp_ie;
241
242 /* keep all the same configuration of the main drp_ie */
243 memcpy(drp_ie, rsv->drp_ie, sizeof(struct uwb_ie_drp));
244
245
246 /* FIXME: handle properly the unsafe bit */
247 uwb_ie_drp_set_unsafe(drp_ie, 1);
248 uwb_ie_drp_set_status(drp_ie, uwb_rsv_companion_status(rsv));
249 uwb_ie_drp_set_reason_code(drp_ie, uwb_rsv_companion_reason_code(rsv));
250
251 uwb_drp_ie_from_bm(drp_ie, &mv->companion_mas);
252 }
253
David Vrabel8cc13a02008-09-17 16:34:09 +0100254 rsv->ie_valid = true;
255 return 0;
256}
257
258/*
259 * Set MAS bits from given MAS bitmap in a single zone of large bitmap.
260 *
261 * We are given a zone id and the MAS bitmap of bits that need to be set in
262 * this zone. Note that this zone may already have bits set and this only
263 * adds settings - we cannot simply assign the MAS bitmap contents to the
264 * zone contents. We iterate over the the bits (MAS) in the zone and set the
265 * bits that are set in the given MAS bitmap.
266 */
267static
268void uwb_drp_ie_single_zone_to_bm(struct uwb_mas_bm *bm, u8 zone, u16 mas_bm)
269{
270 int mas;
271 u16 mas_mask;
272
273 for (mas = 0; mas < UWB_MAS_PER_ZONE; mas++) {
274 mas_mask = 1 << mas;
275 if (mas_bm & mas_mask)
276 set_bit(zone * UWB_NUM_ZONES + mas, bm->bm);
277 }
278}
279
280/**
281 * uwb_drp_ie_zones_to_bm - convert DRP allocation fields to a bitmap
282 * @mas: MAS bitmap that will be populated to correspond to the
283 * allocation fields in the DRP IE
284 * @drp_ie: the DRP IE that contains the allocation fields.
285 *
286 * The input format is an array of MAS allocation fields (16 bit Zone
287 * bitmap, 16 bit MAS bitmap) as described in [ECMA-368] section
288 * 16.8.6. The output is a full 256 bit MAS bitmap.
289 *
290 * We go over all the allocation fields, for each allocation field we
291 * know which zones are impacted. We iterate over all the zones
292 * impacted and call a function that will set the correct MAS bits in
293 * each zone.
294 */
295void uwb_drp_ie_to_bm(struct uwb_mas_bm *bm, const struct uwb_ie_drp *drp_ie)
296{
297 int numallocs = (drp_ie->hdr.length - 4) / 4;
298 const struct uwb_drp_alloc *alloc;
299 int cnt;
300 u16 zone_bm, mas_bm;
301 u8 zone;
302 u16 zone_mask;
303
Stefano Panella5b377172008-12-12 13:00:06 +0000304 bitmap_zero(bm->bm, UWB_NUM_MAS);
305
David Vrabel8cc13a02008-09-17 16:34:09 +0100306 for (cnt = 0; cnt < numallocs; cnt++) {
307 alloc = &drp_ie->allocs[cnt];
308 zone_bm = le16_to_cpu(alloc->zone_bm);
309 mas_bm = le16_to_cpu(alloc->mas_bm);
310 for (zone = 0; zone < UWB_NUM_ZONES; zone++) {
311 zone_mask = 1 << zone;
312 if (zone_bm & zone_mask)
313 uwb_drp_ie_single_zone_to_bm(bm, zone, mas_bm);
314 }
315 }
316}
Stefano Panella5b377172008-12-12 13:00:06 +0000317