blob: 2b4f9406789d0d7eb2e4deadb9446bd22208dd1b [file] [log] [blame]
David Vrabel8cc13a02008-09-17 16:34:09 +01001/*
2 * Ultra Wide Band
3 * Dynamic Reservation Protocol handling
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21#include <linux/kthread.h>
22#include <linux/freezer.h>
23#include <linux/delay.h>
24#include "uwb-internal.h"
25
Stefano Panella5b377172008-12-12 13:00:06 +000026
27/* DRP Conflict Actions ([ECMA-368 2nd Edition] 17.4.6) */
28enum uwb_drp_conflict_action {
29 /* Reservation is mantained, no action needed */
30 UWB_DRP_CONFLICT_MANTAIN = 0,
31
32 /* the device shall not transmit frames in conflicting MASs in
33 * the following superframe. If the device is the reservation
34 * target, it shall also set the Reason Code in its DRP IE to
35 * Conflict in its beacon in the following superframe.
36 */
37 UWB_DRP_CONFLICT_ACT1,
38
39 /* the device shall not set the Reservation Status bit to ONE
40 * and shall not transmit frames in conflicting MASs. If the
41 * device is the reservation target, it shall also set the
42 * Reason Code in its DRP IE to Conflict.
43 */
44 UWB_DRP_CONFLICT_ACT2,
45
46 /* the device shall not transmit frames in conflicting MASs in
47 * the following superframe. It shall remove the conflicting
48 * MASs from the reservation or set the Reservation Status to
49 * ZERO in its beacon in the following superframe. If the
50 * device is the reservation target, it shall also set the
51 * Reason Code in its DRP IE to Conflict.
52 */
53 UWB_DRP_CONFLICT_ACT3,
54};
55
56
57static void uwb_rc_set_drp_cmd_done(struct uwb_rc *rc, void *arg,
58 struct uwb_rceb *reply, ssize_t reply_size)
59{
60 struct uwb_rc_evt_set_drp_ie *r = (struct uwb_rc_evt_set_drp_ie *)reply;
61
62 if (r != NULL) {
63 if (r->bResultCode != UWB_RC_RES_SUCCESS)
64 dev_err(&rc->uwb_dev.dev, "SET-DRP-IE failed: %s (%d)\n",
65 uwb_rc_strerror(r->bResultCode), r->bResultCode);
66 } else
67 dev_err(&rc->uwb_dev.dev, "SET-DRP-IE: timeout\n");
68
69 spin_lock(&rc->rsvs_lock);
70 if (rc->set_drp_ie_pending > 1) {
71 rc->set_drp_ie_pending = 0;
72 uwb_rsv_queue_update(rc);
73 } else {
74 rc->set_drp_ie_pending = 0;
75 }
76 spin_unlock(&rc->rsvs_lock);
77}
78
David Vrabel8cc13a02008-09-17 16:34:09 +010079/**
80 * Construct and send the SET DRP IE
81 *
82 * @rc: UWB Host controller
83 * @returns: >= 0 number of bytes still available in the beacon
84 * < 0 errno code on error.
85 *
86 * See WUSB[8.6.2.7]: The host must set all the DRP IEs that it wants the
87 * device to include in its beacon at the same time. We thus have to
88 * traverse all reservations and include the DRP IEs of all PENDING
89 * and NEGOTIATED reservations in a SET DRP command for transmission.
90 *
91 * A DRP Availability IE is appended.
92 *
David Vrabel6fae35f2008-11-17 15:53:42 +000093 * rc->rsvs_mutex is held
David Vrabel8cc13a02008-09-17 16:34:09 +010094 *
95 * FIXME We currently ignore the returned value indicating the remaining space
96 * in beacon. This could be used to deny reservation requests earlier if
97 * determined that they would cause the beacon space to be exceeded.
98 */
David Vrabel6fae35f2008-11-17 15:53:42 +000099int uwb_rc_send_all_drp_ie(struct uwb_rc *rc)
David Vrabel8cc13a02008-09-17 16:34:09 +0100100{
101 int result;
David Vrabel8cc13a02008-09-17 16:34:09 +0100102 struct uwb_rc_cmd_set_drp_ie *cmd;
David Vrabel8cc13a02008-09-17 16:34:09 +0100103 struct uwb_rsv *rsv;
Stefano Panella5b377172008-12-12 13:00:06 +0000104 struct uwb_rsv_move *mv;
David Vrabel8cc13a02008-09-17 16:34:09 +0100105 int num_bytes = 0;
106 u8 *IEDataptr;
107
108 result = -ENOMEM;
109 /* First traverse all reservations to determine memory needed. */
110 list_for_each_entry(rsv, &rc->reservations, rc_node) {
Stefano Panella5b377172008-12-12 13:00:06 +0000111 if (rsv->drp_ie != NULL) {
David Vrabel8cc13a02008-09-17 16:34:09 +0100112 num_bytes += rsv->drp_ie->hdr.length + 2;
Stefano Panella5b377172008-12-12 13:00:06 +0000113 if (uwb_rsv_has_two_drp_ies(rsv) &&
114 (rsv->mv.companion_drp_ie != NULL)) {
115 mv = &rsv->mv;
116 num_bytes += mv->companion_drp_ie->hdr.length + 2;
117 }
118 }
David Vrabel8cc13a02008-09-17 16:34:09 +0100119 }
120 num_bytes += sizeof(rc->drp_avail.ie);
121 cmd = kzalloc(sizeof(*cmd) + num_bytes, GFP_KERNEL);
122 if (cmd == NULL)
123 goto error;
124 cmd->rccb.bCommandType = UWB_RC_CET_GENERAL;
125 cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_SET_DRP_IE);
126 cmd->wIELength = num_bytes;
127 IEDataptr = (u8 *)&cmd->IEData[0];
128
Stefano Panella5b377172008-12-12 13:00:06 +0000129 /* FIXME: DRV avail IE is not always needed */
130 /* put DRP avail IE first */
131 memcpy(IEDataptr, &rc->drp_avail.ie, sizeof(rc->drp_avail.ie));
132 IEDataptr += sizeof(struct uwb_ie_drp_avail);
133
David Vrabel8cc13a02008-09-17 16:34:09 +0100134 /* Next traverse all reservations to place IEs in allocated memory. */
135 list_for_each_entry(rsv, &rc->reservations, rc_node) {
136 if (rsv->drp_ie != NULL) {
137 memcpy(IEDataptr, rsv->drp_ie,
138 rsv->drp_ie->hdr.length + 2);
139 IEDataptr += rsv->drp_ie->hdr.length + 2;
Stefano Panella5b377172008-12-12 13:00:06 +0000140
141 if (uwb_rsv_has_two_drp_ies(rsv) &&
142 (rsv->mv.companion_drp_ie != NULL)) {
143 mv = &rsv->mv;
144 memcpy(IEDataptr, mv->companion_drp_ie,
145 mv->companion_drp_ie->hdr.length + 2);
146 IEDataptr += mv->companion_drp_ie->hdr.length + 2;
147 }
David Vrabel8cc13a02008-09-17 16:34:09 +0100148 }
149 }
David Vrabel8cc13a02008-09-17 16:34:09 +0100150
Stefano Panella5b377172008-12-12 13:00:06 +0000151 result = uwb_rc_cmd_async(rc, "SET-DRP-IE", &cmd->rccb, sizeof(*cmd) + num_bytes,
152 UWB_RC_CET_GENERAL, UWB_RC_CMD_SET_DRP_IE,
153 uwb_rc_set_drp_cmd_done, NULL);
154
155 rc->set_drp_ie_pending = 1;
156
David Vrabel8cc13a02008-09-17 16:34:09 +0100157 kfree(cmd);
158error:
159 return result;
David Vrabel8cc13a02008-09-17 16:34:09 +0100160}
161
Stefano Panella5b377172008-12-12 13:00:06 +0000162/*
163 * Evaluate the action to perform using conflict resolution rules
164 *
165 * Return a uwb_drp_conflict_action.
166 */
167static int evaluate_conflict_action(struct uwb_ie_drp *ext_drp_ie, int ext_beacon_slot,
168 struct uwb_rsv *rsv, int our_status)
David Vrabel8cc13a02008-09-17 16:34:09 +0100169{
Stefano Panella5b377172008-12-12 13:00:06 +0000170 int our_tie_breaker = rsv->tiebreaker;
171 int our_type = rsv->type;
172 int our_beacon_slot = rsv->rc->uwb_dev.beacon_slot;
David Vrabel8cc13a02008-09-17 16:34:09 +0100173
Stefano Panella5b377172008-12-12 13:00:06 +0000174 int ext_tie_breaker = uwb_ie_drp_tiebreaker(ext_drp_ie);
175 int ext_status = uwb_ie_drp_status(ext_drp_ie);
176 int ext_type = uwb_ie_drp_type(ext_drp_ie);
177
178
179 /* [ECMA-368 2nd Edition] 17.4.6 */
180 if (ext_type == UWB_DRP_TYPE_PCA && our_type == UWB_DRP_TYPE_PCA) {
181 return UWB_DRP_CONFLICT_MANTAIN;
David Vrabel8cc13a02008-09-17 16:34:09 +0100182 }
Stefano Panella5b377172008-12-12 13:00:06 +0000183
184 /* [ECMA-368 2nd Edition] 17.4.6-1 */
185 if (our_type == UWB_DRP_TYPE_ALIEN_BP) {
186 return UWB_DRP_CONFLICT_MANTAIN;
187 }
188
189 /* [ECMA-368 2nd Edition] 17.4.6-2 */
190 if (ext_type == UWB_DRP_TYPE_ALIEN_BP) {
191 /* here we know our_type != UWB_DRP_TYPE_ALIEN_BP */
192 return UWB_DRP_CONFLICT_ACT1;
193 }
194
195 /* [ECMA-368 2nd Edition] 17.4.6-3 */
196 if (our_status == 0 && ext_status == 1) {
197 return UWB_DRP_CONFLICT_ACT2;
198 }
199
200 /* [ECMA-368 2nd Edition] 17.4.6-4 */
201 if (our_status == 1 && ext_status == 0) {
202 return UWB_DRP_CONFLICT_MANTAIN;
203 }
204
205 /* [ECMA-368 2nd Edition] 17.4.6-5a */
206 if (our_tie_breaker == ext_tie_breaker &&
207 our_beacon_slot < ext_beacon_slot) {
208 return UWB_DRP_CONFLICT_MANTAIN;
209 }
210
211 /* [ECMA-368 2nd Edition] 17.4.6-5b */
212 if (our_tie_breaker != ext_tie_breaker &&
213 our_beacon_slot > ext_beacon_slot) {
214 return UWB_DRP_CONFLICT_MANTAIN;
215 }
216
217 if (our_status == 0) {
218 if (our_tie_breaker == ext_tie_breaker) {
219 /* [ECMA-368 2nd Edition] 17.4.6-6a */
220 if (our_beacon_slot > ext_beacon_slot) {
221 return UWB_DRP_CONFLICT_ACT2;
222 }
223 } else {
224 /* [ECMA-368 2nd Edition] 17.4.6-6b */
225 if (our_beacon_slot < ext_beacon_slot) {
226 return UWB_DRP_CONFLICT_ACT2;
227 }
228 }
229 } else {
230 if (our_tie_breaker == ext_tie_breaker) {
231 /* [ECMA-368 2nd Edition] 17.4.6-7a */
232 if (our_beacon_slot > ext_beacon_slot) {
233 return UWB_DRP_CONFLICT_ACT3;
234 }
235 } else {
236 /* [ECMA-368 2nd Edition] 17.4.6-7b */
237 if (our_beacon_slot < ext_beacon_slot) {
238 return UWB_DRP_CONFLICT_ACT3;
239 }
240 }
241 }
242 return UWB_DRP_CONFLICT_MANTAIN;
David Vrabel8cc13a02008-09-17 16:34:09 +0100243}
244
Stefano Panella5b377172008-12-12 13:00:06 +0000245static void handle_conflict_normal(struct uwb_ie_drp *drp_ie,
246 int ext_beacon_slot,
247 struct uwb_rsv *rsv,
248 struct uwb_mas_bm *conflicting_mas)
249{
250 struct uwb_rc *rc = rsv->rc;
251 struct uwb_rsv_move *mv = &rsv->mv;
252 struct uwb_drp_backoff_win *bow = &rc->bow;
253 int action;
254
255 action = evaluate_conflict_action(drp_ie, ext_beacon_slot, rsv, uwb_rsv_status(rsv));
256
257 if (uwb_rsv_is_owner(rsv)) {
258 switch(action) {
259 case UWB_DRP_CONFLICT_ACT2:
260 /* try move */
261 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_TO_BE_MOVED);
262 if (bow->can_reserve_extra_mases == false)
263 uwb_rsv_backoff_win_increment(rc);
264
265 break;
266 case UWB_DRP_CONFLICT_ACT3:
267 uwb_rsv_backoff_win_increment(rc);
268 /* drop some mases with reason modified */
269 /* put in the companion the mases to be dropped */
270 bitmap_and(mv->companion_mas.bm, rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS);
271 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED);
272 default:
273 break;
274 }
275 } else {
276 switch(action) {
277 case UWB_DRP_CONFLICT_ACT2:
278 case UWB_DRP_CONFLICT_ACT3:
279 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT);
280 default:
281 break;
282 }
283
284 }
285
286}
287
288static void handle_conflict_expanding(struct uwb_ie_drp *drp_ie, int ext_beacon_slot,
289 struct uwb_rsv *rsv, bool companion_only,
290 struct uwb_mas_bm *conflicting_mas)
291{
292 struct uwb_rc *rc = rsv->rc;
293 struct uwb_drp_backoff_win *bow = &rc->bow;
294 struct uwb_rsv_move *mv = &rsv->mv;
295 int action;
296
297 if (companion_only) {
298 /* status of companion is 0 at this point */
299 action = evaluate_conflict_action(drp_ie, ext_beacon_slot, rsv, 0);
300 if (uwb_rsv_is_owner(rsv)) {
301 switch(action) {
302 case UWB_DRP_CONFLICT_ACT2:
303 case UWB_DRP_CONFLICT_ACT3:
304 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
305 rsv->needs_release_companion_mas = false;
306 if (bow->can_reserve_extra_mases == false)
307 uwb_rsv_backoff_win_increment(rc);
308 uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas);
309 }
310 } else { /* rsv is target */
311 switch(action) {
312 case UWB_DRP_CONFLICT_ACT2:
313 case UWB_DRP_CONFLICT_ACT3:
314 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_EXPANDING_CONFLICT);
315 /* send_drp_avail_ie = true; */
316 }
317 }
318 } else { /* also base part of the reservation is conflicting */
319 if (uwb_rsv_is_owner(rsv)) {
320 uwb_rsv_backoff_win_increment(rc);
321 /* remove companion part */
322 uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas);
323
324 /* drop some mases with reason modified */
325
326 /* put in the companion the mases to be dropped */
327 bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS);
328 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED);
329 } else { /* it is a target rsv */
330 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT);
331 /* send_drp_avail_ie = true; */
332 }
333 }
334}
335
336static void uwb_drp_handle_conflict_rsv(struct uwb_rc *rc, struct uwb_rsv *rsv,
337 struct uwb_rc_evt_drp *drp_evt,
338 struct uwb_ie_drp *drp_ie,
339 struct uwb_mas_bm *conflicting_mas)
340{
341 struct uwb_rsv_move *mv;
342
343 /* check if the conflicting reservation has two drp_ies */
344 if (uwb_rsv_has_two_drp_ies(rsv)) {
345 mv = &rsv->mv;
346 if (bitmap_intersects(rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS)) {
347 handle_conflict_expanding(drp_ie, drp_evt->beacon_slot_number,
348 rsv, false, conflicting_mas);
349 } else {
350 if (bitmap_intersects(mv->companion_mas.bm, conflicting_mas->bm, UWB_NUM_MAS)) {
351 handle_conflict_expanding(drp_ie, drp_evt->beacon_slot_number,
352 rsv, true, conflicting_mas);
353 }
354 }
355 } else if (bitmap_intersects(rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS)) {
356 handle_conflict_normal(drp_ie, drp_evt->beacon_slot_number, rsv, conflicting_mas);
357 }
358}
359
360static void uwb_drp_handle_all_conflict_rsv(struct uwb_rc *rc,
361 struct uwb_rc_evt_drp *drp_evt,
362 struct uwb_ie_drp *drp_ie,
363 struct uwb_mas_bm *conflicting_mas)
364{
365 struct uwb_rsv *rsv;
366
367 list_for_each_entry(rsv, &rc->reservations, rc_node) {
368 uwb_drp_handle_conflict_rsv(rc, rsv, drp_evt, drp_ie, conflicting_mas);
369 }
370}
371
David Vrabel8cc13a02008-09-17 16:34:09 +0100372/*
373 * Based on the DRP IE, transition a target reservation to a new
374 * state.
375 */
376static void uwb_drp_process_target(struct uwb_rc *rc, struct uwb_rsv *rsv,
Stefano Panella5b377172008-12-12 13:00:06 +0000377 struct uwb_ie_drp *drp_ie, struct uwb_rc_evt_drp *drp_evt)
David Vrabel8cc13a02008-09-17 16:34:09 +0100378{
379 struct device *dev = &rc->uwb_dev.dev;
Stefano Panella5b377172008-12-12 13:00:06 +0000380 struct uwb_rsv_move *mv = &rsv->mv;
David Vrabel8cc13a02008-09-17 16:34:09 +0100381 int status;
382 enum uwb_drp_reason reason_code;
Stefano Panella5b377172008-12-12 13:00:06 +0000383 struct uwb_mas_bm mas;
384
David Vrabel8cc13a02008-09-17 16:34:09 +0100385 status = uwb_ie_drp_status(drp_ie);
386 reason_code = uwb_ie_drp_reason_code(drp_ie);
Stefano Panella5b377172008-12-12 13:00:06 +0000387 uwb_drp_ie_to_bm(&mas, drp_ie);
David Vrabel8cc13a02008-09-17 16:34:09 +0100388
Stefano Panella5b377172008-12-12 13:00:06 +0000389 switch (reason_code) {
390 case UWB_DRP_REASON_ACCEPTED:
391
392 if (rsv->state == UWB_RSV_STATE_T_CONFLICT) {
393 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT);
394 break;
395 }
396
397 if (rsv->state == UWB_RSV_STATE_T_EXPANDING_ACCEPTED) {
398 /* drp_ie is companion */
399 if (!bitmap_equal(rsv->mas.bm, mas.bm, UWB_NUM_MAS))
400 /* stroke companion */
401 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_EXPANDING_ACCEPTED);
402 } else {
403 if (!bitmap_equal(rsv->mas.bm, mas.bm, UWB_NUM_MAS)) {
404 if (uwb_drp_avail_reserve_pending(rc, &mas) == -EBUSY) {
405 /* FIXME: there is a conflict, find
406 * the conflicting reservations and
407 * take a sensible action. Consider
408 * that in drp_ie there is the
409 * "neighbour" */
410 uwb_drp_handle_all_conflict_rsv(rc, drp_evt, drp_ie, &mas);
411 } else {
412 /* accept the extra reservation */
413 bitmap_copy(mv->companion_mas.bm, mas.bm, UWB_NUM_MAS);
414 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_EXPANDING_ACCEPTED);
415 }
416 } else {
417 if (status) {
418 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED);
419 }
420 }
421
422 }
423 break;
424
425 case UWB_DRP_REASON_MODIFIED:
426 /* check to see if we have already modified the reservation */
427 if (bitmap_equal(rsv->mas.bm, mas.bm, UWB_NUM_MAS)) {
David Vrabel8cc13a02008-09-17 16:34:09 +0100428 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED);
429 break;
David Vrabel8cc13a02008-09-17 16:34:09 +0100430 }
Stefano Panella5b377172008-12-12 13:00:06 +0000431
432 /* find if the owner wants to expand or reduce */
433 if (bitmap_subset(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) {
434 /* owner is reducing */
435 bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, mas.bm, UWB_NUM_MAS);
436 uwb_drp_avail_release(rsv->rc, &mv->companion_mas);
David Vrabel8cc13a02008-09-17 16:34:09 +0100437 }
Stefano Panella5b377172008-12-12 13:00:06 +0000438
439 bitmap_copy(rsv->mas.bm, mas.bm, UWB_NUM_MAS);
440 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_RESIZED);
441 break;
442 default:
443 dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
444 reason_code, status);
David Vrabel8cc13a02008-09-17 16:34:09 +0100445 }
446}
447
448/*
449 * Based on the DRP IE, transition an owner reservation to a new
450 * state.
451 */
452static void uwb_drp_process_owner(struct uwb_rc *rc, struct uwb_rsv *rsv,
Stefano Panella5b377172008-12-12 13:00:06 +0000453 struct uwb_dev *src, struct uwb_ie_drp *drp_ie,
454 struct uwb_rc_evt_drp *drp_evt)
David Vrabel8cc13a02008-09-17 16:34:09 +0100455{
456 struct device *dev = &rc->uwb_dev.dev;
Stefano Panella5b377172008-12-12 13:00:06 +0000457 struct uwb_rsv_move *mv = &rsv->mv;
David Vrabel8cc13a02008-09-17 16:34:09 +0100458 int status;
459 enum uwb_drp_reason reason_code;
Stefano Panella5b377172008-12-12 13:00:06 +0000460 struct uwb_mas_bm mas;
David Vrabel8cc13a02008-09-17 16:34:09 +0100461
462 status = uwb_ie_drp_status(drp_ie);
463 reason_code = uwb_ie_drp_reason_code(drp_ie);
Stefano Panella5b377172008-12-12 13:00:06 +0000464 uwb_drp_ie_to_bm(&mas, drp_ie);
David Vrabel8cc13a02008-09-17 16:34:09 +0100465
466 if (status) {
467 switch (reason_code) {
468 case UWB_DRP_REASON_ACCEPTED:
Stefano Panella5b377172008-12-12 13:00:06 +0000469 switch (rsv->state) {
470 case UWB_RSV_STATE_O_PENDING:
471 case UWB_RSV_STATE_O_INITIATED:
472 case UWB_RSV_STATE_O_ESTABLISHED:
473 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
474 break;
475 case UWB_RSV_STATE_O_MODIFIED:
476 if (bitmap_equal(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) {
477 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
478 } else {
479 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED);
480 }
481 break;
482
483 case UWB_RSV_STATE_O_MOVE_REDUCING: /* shouldn' t be a problem */
484 if (bitmap_equal(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) {
485 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
486 } else {
487 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING);
488 }
489 break;
490 case UWB_RSV_STATE_O_MOVE_EXPANDING:
491 if (bitmap_equal(mas.bm, mv->companion_mas.bm, UWB_NUM_MAS)) {
492 /* Companion reservation accepted */
493 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING);
494 } else {
495 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING);
496 }
497 break;
498 case UWB_RSV_STATE_O_MOVE_COMBINING:
499 if (bitmap_equal(mas.bm, rsv->mas.bm, UWB_NUM_MAS))
500 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING);
501 else
502 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING);
503 break;
504 default:
505 break;
506 }
David Vrabel8cc13a02008-09-17 16:34:09 +0100507 break;
508 default:
509 dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
510 reason_code, status);
511 }
512 } else {
513 switch (reason_code) {
514 case UWB_DRP_REASON_PENDING:
515 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_PENDING);
516 break;
517 case UWB_DRP_REASON_DENIED:
518 uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
519 break;
520 case UWB_DRP_REASON_CONFLICT:
Stefano Panella5b377172008-12-12 13:00:06 +0000521 /* resolve the conflict */
522 bitmap_complement(mas.bm, src->last_availability_bm,
523 UWB_NUM_MAS);
524 uwb_drp_handle_conflict_rsv(rc, rsv, drp_evt, drp_ie, &mas);
David Vrabel8cc13a02008-09-17 16:34:09 +0100525 break;
526 default:
527 dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
528 reason_code, status);
529 }
530 }
531}
532
Stefano Panella5b377172008-12-12 13:00:06 +0000533static void uwb_cnflt_alien_stroke_timer(struct uwb_cnflt_alien *cnflt)
534{
535 unsigned timeout_us = UWB_MAX_LOST_BEACONS * UWB_SUPERFRAME_LENGTH_US;
536 mod_timer(&cnflt->timer, jiffies + usecs_to_jiffies(timeout_us));
537}
538
539static void uwb_cnflt_update_work(struct work_struct *work)
540{
541 struct uwb_cnflt_alien *cnflt = container_of(work,
542 struct uwb_cnflt_alien,
543 cnflt_update_work);
544 struct uwb_cnflt_alien *c;
545 struct uwb_rc *rc = cnflt->rc;
546
547 unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE;
548
549 mutex_lock(&rc->rsvs_mutex);
550
551 list_del(&cnflt->rc_node);
552
553 /* update rc global conflicting alien bitmap */
554 bitmap_zero(rc->cnflt_alien_bitmap.bm, UWB_NUM_MAS);
555
556 list_for_each_entry(c, &rc->cnflt_alien_list, rc_node) {
557 bitmap_or(rc->cnflt_alien_bitmap.bm, rc->cnflt_alien_bitmap.bm, c->mas.bm, UWB_NUM_MAS);
558 }
559
560 queue_delayed_work(rc->rsv_workq, &rc->rsv_alien_bp_work, usecs_to_jiffies(delay_us));
561
562 kfree(cnflt);
563 mutex_unlock(&rc->rsvs_mutex);
564}
565
566static void uwb_cnflt_timer(unsigned long arg)
567{
568 struct uwb_cnflt_alien *cnflt = (struct uwb_cnflt_alien *)arg;
569
570 queue_work(cnflt->rc->rsv_workq, &cnflt->cnflt_update_work);
571}
572
David Vrabel8cc13a02008-09-17 16:34:09 +0100573/*
Stefano Panella5b377172008-12-12 13:00:06 +0000574 * We have received an DRP_IE of type Alien BP and we need to make
575 * sure we do not transmit in conflicting MASs.
David Vrabel8cc13a02008-09-17 16:34:09 +0100576 */
Stefano Panella5b377172008-12-12 13:00:06 +0000577static void uwb_drp_handle_alien_drp(struct uwb_rc *rc, struct uwb_ie_drp *drp_ie)
578{
579 struct device *dev = &rc->uwb_dev.dev;
580 struct uwb_mas_bm mas;
581 struct uwb_cnflt_alien *cnflt;
582 char buf[72];
583 unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE;
584
585 uwb_drp_ie_to_bm(&mas, drp_ie);
586 bitmap_scnprintf(buf, sizeof(buf), mas.bm, UWB_NUM_MAS);
587
588 list_for_each_entry(cnflt, &rc->cnflt_alien_list, rc_node) {
589 if (bitmap_equal(cnflt->mas.bm, mas.bm, UWB_NUM_MAS)) {
590 /* Existing alien BP reservation conflicting
591 * bitmap, just reset the timer */
592 uwb_cnflt_alien_stroke_timer(cnflt);
593 return;
594 }
595 }
596
597 /* New alien BP reservation conflicting bitmap */
598
599 /* alloc and initialize new uwb_cnflt_alien */
600 cnflt = kzalloc(sizeof(struct uwb_cnflt_alien), GFP_KERNEL);
601 if (!cnflt)
602 dev_err(dev, "failed to alloc uwb_cnflt_alien struct\n");
603 INIT_LIST_HEAD(&cnflt->rc_node);
604 init_timer(&cnflt->timer);
605 cnflt->timer.function = uwb_cnflt_timer;
606 cnflt->timer.data = (unsigned long)cnflt;
607
608 cnflt->rc = rc;
609 INIT_WORK(&cnflt->cnflt_update_work, uwb_cnflt_update_work);
610
611 bitmap_copy(cnflt->mas.bm, mas.bm, UWB_NUM_MAS);
612
613 list_add_tail(&cnflt->rc_node, &rc->cnflt_alien_list);
614
615 /* update rc global conflicting alien bitmap */
616 bitmap_or(rc->cnflt_alien_bitmap.bm, rc->cnflt_alien_bitmap.bm, mas.bm, UWB_NUM_MAS);
617
618 queue_delayed_work(rc->rsv_workq, &rc->rsv_alien_bp_work, usecs_to_jiffies(delay_us));
619
620 /* start the timer */
621 uwb_cnflt_alien_stroke_timer(cnflt);
622}
623
624static void uwb_drp_process_not_involved(struct uwb_rc *rc,
625 struct uwb_rc_evt_drp *drp_evt,
626 struct uwb_ie_drp *drp_ie)
627{
628 struct uwb_mas_bm mas;
629
630 uwb_drp_ie_to_bm(&mas, drp_ie);
631 uwb_drp_handle_all_conflict_rsv(rc, drp_evt, drp_ie, &mas);
632}
633
634static void uwb_drp_process_involved(struct uwb_rc *rc, struct uwb_dev *src,
635 struct uwb_rc_evt_drp *drp_evt,
636 struct uwb_ie_drp *drp_ie)
David Vrabel8cc13a02008-09-17 16:34:09 +0100637{
638 struct uwb_rsv *rsv;
639
640 rsv = uwb_rsv_find(rc, src, drp_ie);
641 if (!rsv) {
642 /*
643 * No reservation? It's either for a recently
644 * terminated reservation; or the DRP IE couldn't be
645 * processed (e.g., an invalid IE or out of memory).
646 */
647 return;
648 }
Stefano Panella5b377172008-12-12 13:00:06 +0000649
David Vrabel8cc13a02008-09-17 16:34:09 +0100650 /*
651 * Do nothing with DRP IEs for reservations that have been
652 * terminated.
653 */
654 if (rsv->state == UWB_RSV_STATE_NONE) {
655 uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
656 return;
657 }
Stefano Panella5b377172008-12-12 13:00:06 +0000658
David Vrabel8cc13a02008-09-17 16:34:09 +0100659 if (uwb_ie_drp_owner(drp_ie))
Stefano Panella5b377172008-12-12 13:00:06 +0000660 uwb_drp_process_target(rc, rsv, drp_ie, drp_evt);
David Vrabel8cc13a02008-09-17 16:34:09 +0100661 else
Stefano Panella5b377172008-12-12 13:00:06 +0000662 uwb_drp_process_owner(rc, rsv, src, drp_ie, drp_evt);
663
David Vrabel8cc13a02008-09-17 16:34:09 +0100664}
665
666
Stefano Panella5b377172008-12-12 13:00:06 +0000667static bool uwb_drp_involves_us(struct uwb_rc *rc, struct uwb_ie_drp *drp_ie)
668{
669 return uwb_dev_addr_cmp(&rc->uwb_dev.dev_addr, &drp_ie->dev_addr) == 0;
670}
671
672/*
673 * Process a received DRP IE.
674 */
675static void uwb_drp_process(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt,
676 struct uwb_dev *src, struct uwb_ie_drp *drp_ie)
677{
678 if (uwb_ie_drp_type(drp_ie) == UWB_DRP_TYPE_ALIEN_BP)
679 uwb_drp_handle_alien_drp(rc, drp_ie);
680 else if (uwb_drp_involves_us(rc, drp_ie))
681 uwb_drp_process_involved(rc, src, drp_evt, drp_ie);
682 else
683 uwb_drp_process_not_involved(rc, drp_evt, drp_ie);
684}
685
686/*
687 * Process a received DRP Availability IE
688 */
689static void uwb_drp_availability_process(struct uwb_rc *rc, struct uwb_dev *src,
690 struct uwb_ie_drp_avail *drp_availability_ie)
691{
692 bitmap_copy(src->last_availability_bm,
693 drp_availability_ie->bmp, UWB_NUM_MAS);
694}
695
David Vrabel8cc13a02008-09-17 16:34:09 +0100696/*
697 * Process all the DRP IEs (both DRP IEs and the DRP Availability IE)
698 * from a device.
699 */
700static
701void uwb_drp_process_all(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt,
702 size_t ielen, struct uwb_dev *src_dev)
703{
704 struct device *dev = &rc->uwb_dev.dev;
705 struct uwb_ie_hdr *ie_hdr;
706 void *ptr;
707
708 ptr = drp_evt->ie_data;
709 for (;;) {
710 ie_hdr = uwb_ie_next(&ptr, &ielen);
711 if (!ie_hdr)
712 break;
713
714 switch (ie_hdr->element_id) {
715 case UWB_IE_DRP_AVAILABILITY:
Stefano Panella5b377172008-12-12 13:00:06 +0000716 uwb_drp_availability_process(rc, src_dev, (struct uwb_ie_drp_avail *)ie_hdr);
David Vrabel8cc13a02008-09-17 16:34:09 +0100717 break;
718 case UWB_IE_DRP:
Stefano Panella5b377172008-12-12 13:00:06 +0000719 uwb_drp_process(rc, drp_evt, src_dev, (struct uwb_ie_drp *)ie_hdr);
David Vrabel8cc13a02008-09-17 16:34:09 +0100720 break;
721 default:
722 dev_warn(dev, "unexpected IE in DRP notification\n");
723 break;
724 }
725 }
726
727 if (ielen > 0)
728 dev_warn(dev, "%d octets remaining in DRP notification\n",
729 (int)ielen);
730}
731
David Vrabel8cc13a02008-09-17 16:34:09 +0100732/**
733 * uwbd_evt_handle_rc_drp - handle a DRP_IE event
734 * @evt: the DRP_IE event from the radio controller
735 *
736 * This processes DRP notifications from the radio controller, either
737 * initiating a new reservation or transitioning an existing
738 * reservation into a different state.
739 *
740 * DRP notifications can occur for three different reasons:
741 *
742 * - UWB_DRP_NOTIF_DRP_IE_RECVD: one or more DRP IEs with the RC as
743 * the target or source have been recieved.
744 *
745 * These DRP IEs could be new or for an existing reservation.
746 *
747 * If the DRP IE for an existing reservation ceases to be to
748 * recieved for at least mMaxLostBeacons, the reservation should be
749 * considered to be terminated. Note that the TERMINATE reason (see
750 * below) may not always be signalled (e.g., the remote device has
751 * two or more reservations established with the RC).
752 *
753 * - UWB_DRP_NOTIF_CONFLICT: DRP IEs from any device in the beacon
754 * group conflict with the RC's reservations.
755 *
756 * - UWB_DRP_NOTIF_TERMINATE: DRP IEs are no longer being received
757 * from a device (i.e., it's terminated all reservations).
758 *
759 * Only the software state of the reservations is changed; the setting
760 * of the radio controller's DRP IEs is done after all the events in
761 * an event buffer are processed. This saves waiting multiple times
762 * for the SET_DRP_IE command to complete.
763 */
764int uwbd_evt_handle_rc_drp(struct uwb_event *evt)
765{
766 struct device *dev = &evt->rc->uwb_dev.dev;
767 struct uwb_rc *rc = evt->rc;
768 struct uwb_rc_evt_drp *drp_evt;
769 size_t ielength, bytes_left;
770 struct uwb_dev_addr src_addr;
771 struct uwb_dev *src_dev;
David Vrabel8cc13a02008-09-17 16:34:09 +0100772
773 /* Is there enough data to decode the event (and any IEs in
774 its payload)? */
775 if (evt->notif.size < sizeof(*drp_evt)) {
776 dev_err(dev, "DRP event: Not enough data to decode event "
777 "[%zu bytes left, %zu needed]\n",
778 evt->notif.size, sizeof(*drp_evt));
779 return 0;
780 }
781 bytes_left = evt->notif.size - sizeof(*drp_evt);
782 drp_evt = container_of(evt->notif.rceb, struct uwb_rc_evt_drp, rceb);
783 ielength = le16_to_cpu(drp_evt->ie_length);
784 if (bytes_left != ielength) {
785 dev_err(dev, "DRP event: Not enough data in payload [%zu"
786 "bytes left, %zu declared in the event]\n",
787 bytes_left, ielength);
788 return 0;
789 }
790
791 memcpy(src_addr.data, &drp_evt->src_addr, sizeof(src_addr));
792 src_dev = uwb_dev_get_by_devaddr(rc, &src_addr);
793 if (!src_dev) {
794 /*
795 * A DRP notification from an unrecognized device.
796 *
797 * This is probably from a WUSB device that doesn't
798 * have an EUI-48 and therefore doesn't show up in the
799 * UWB device database. It's safe to simply ignore
800 * these.
801 */
802 return 0;
803 }
804
805 mutex_lock(&rc->rsvs_mutex);
806
Stefano Panella5b377172008-12-12 13:00:06 +0000807 /* We do not distinguish from the reason */
808 uwb_drp_process_all(rc, drp_evt, ielength, src_dev);
David Vrabel8cc13a02008-09-17 16:34:09 +0100809
810 mutex_unlock(&rc->rsvs_mutex);
811
812 uwb_dev_put(src_dev);
813 return 0;
814}