blob: 468c89fb6a1689a60ff6d0ccc41d0d1fcb2bd1ef [file] [log] [blame]
David Vrabel8cc13a02008-09-17 16:34:09 +01001/*
2 * Ultra Wide Band
3 * Dynamic Reservation Protocol handling
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21#include <linux/kthread.h>
22#include <linux/freezer.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090023#include <linux/slab.h>
David Vrabel8cc13a02008-09-17 16:34:09 +010024#include <linux/delay.h>
25#include "uwb-internal.h"
26
Stefano Panella5b377172008-12-12 13:00:06 +000027
28/* DRP Conflict Actions ([ECMA-368 2nd Edition] 17.4.6) */
29enum uwb_drp_conflict_action {
Lucas De Marchi25985ed2011-03-30 22:57:33 -030030 /* Reservation is maintained, no action needed */
Stefano Panella5b377172008-12-12 13:00:06 +000031 UWB_DRP_CONFLICT_MANTAIN = 0,
32
33 /* the device shall not transmit frames in conflicting MASs in
34 * the following superframe. If the device is the reservation
35 * target, it shall also set the Reason Code in its DRP IE to
36 * Conflict in its beacon in the following superframe.
37 */
38 UWB_DRP_CONFLICT_ACT1,
39
40 /* the device shall not set the Reservation Status bit to ONE
41 * and shall not transmit frames in conflicting MASs. If the
42 * device is the reservation target, it shall also set the
43 * Reason Code in its DRP IE to Conflict.
44 */
45 UWB_DRP_CONFLICT_ACT2,
46
47 /* the device shall not transmit frames in conflicting MASs in
48 * the following superframe. It shall remove the conflicting
49 * MASs from the reservation or set the Reservation Status to
50 * ZERO in its beacon in the following superframe. If the
51 * device is the reservation target, it shall also set the
52 * Reason Code in its DRP IE to Conflict.
53 */
54 UWB_DRP_CONFLICT_ACT3,
55};
56
57
58static void uwb_rc_set_drp_cmd_done(struct uwb_rc *rc, void *arg,
59 struct uwb_rceb *reply, ssize_t reply_size)
60{
61 struct uwb_rc_evt_set_drp_ie *r = (struct uwb_rc_evt_set_drp_ie *)reply;
Thomas Pugliesec996b932014-04-23 14:42:47 -050062 unsigned long flags;
Stefano Panella5b377172008-12-12 13:00:06 +000063
64 if (r != NULL) {
65 if (r->bResultCode != UWB_RC_RES_SUCCESS)
66 dev_err(&rc->uwb_dev.dev, "SET-DRP-IE failed: %s (%d)\n",
67 uwb_rc_strerror(r->bResultCode), r->bResultCode);
68 } else
69 dev_err(&rc->uwb_dev.dev, "SET-DRP-IE: timeout\n");
70
Thomas Pugliesec996b932014-04-23 14:42:47 -050071 spin_lock_irqsave(&rc->rsvs_lock, flags);
Stefano Panella5b377172008-12-12 13:00:06 +000072 if (rc->set_drp_ie_pending > 1) {
73 rc->set_drp_ie_pending = 0;
Thomas Pugliesec996b932014-04-23 14:42:47 -050074 uwb_rsv_queue_update(rc);
Stefano Panella5b377172008-12-12 13:00:06 +000075 } else {
Thomas Pugliesec996b932014-04-23 14:42:47 -050076 rc->set_drp_ie_pending = 0;
Stefano Panella5b377172008-12-12 13:00:06 +000077 }
Thomas Pugliesec996b932014-04-23 14:42:47 -050078 spin_unlock_irqrestore(&rc->rsvs_lock, flags);
Stefano Panella5b377172008-12-12 13:00:06 +000079}
80
David Vrabel8cc13a02008-09-17 16:34:09 +010081/**
82 * Construct and send the SET DRP IE
83 *
84 * @rc: UWB Host controller
85 * @returns: >= 0 number of bytes still available in the beacon
86 * < 0 errno code on error.
87 *
88 * See WUSB[8.6.2.7]: The host must set all the DRP IEs that it wants the
89 * device to include in its beacon at the same time. We thus have to
90 * traverse all reservations and include the DRP IEs of all PENDING
91 * and NEGOTIATED reservations in a SET DRP command for transmission.
92 *
93 * A DRP Availability IE is appended.
94 *
David Vrabel6fae35f2008-11-17 15:53:42 +000095 * rc->rsvs_mutex is held
David Vrabel8cc13a02008-09-17 16:34:09 +010096 *
97 * FIXME We currently ignore the returned value indicating the remaining space
98 * in beacon. This could be used to deny reservation requests earlier if
99 * determined that they would cause the beacon space to be exceeded.
100 */
David Vrabel6fae35f2008-11-17 15:53:42 +0000101int uwb_rc_send_all_drp_ie(struct uwb_rc *rc)
David Vrabel8cc13a02008-09-17 16:34:09 +0100102{
103 int result;
David Vrabel8cc13a02008-09-17 16:34:09 +0100104 struct uwb_rc_cmd_set_drp_ie *cmd;
David Vrabel8cc13a02008-09-17 16:34:09 +0100105 struct uwb_rsv *rsv;
Stefano Panella5b377172008-12-12 13:00:06 +0000106 struct uwb_rsv_move *mv;
David Vrabel8cc13a02008-09-17 16:34:09 +0100107 int num_bytes = 0;
108 u8 *IEDataptr;
109
110 result = -ENOMEM;
111 /* First traverse all reservations to determine memory needed. */
112 list_for_each_entry(rsv, &rc->reservations, rc_node) {
Stefano Panella5b377172008-12-12 13:00:06 +0000113 if (rsv->drp_ie != NULL) {
David Vrabel8cc13a02008-09-17 16:34:09 +0100114 num_bytes += rsv->drp_ie->hdr.length + 2;
Stefano Panella5b377172008-12-12 13:00:06 +0000115 if (uwb_rsv_has_two_drp_ies(rsv) &&
116 (rsv->mv.companion_drp_ie != NULL)) {
117 mv = &rsv->mv;
118 num_bytes += mv->companion_drp_ie->hdr.length + 2;
119 }
120 }
David Vrabel8cc13a02008-09-17 16:34:09 +0100121 }
122 num_bytes += sizeof(rc->drp_avail.ie);
123 cmd = kzalloc(sizeof(*cmd) + num_bytes, GFP_KERNEL);
124 if (cmd == NULL)
125 goto error;
126 cmd->rccb.bCommandType = UWB_RC_CET_GENERAL;
127 cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_SET_DRP_IE);
128 cmd->wIELength = num_bytes;
129 IEDataptr = (u8 *)&cmd->IEData[0];
130
Stefano Panella5b377172008-12-12 13:00:06 +0000131 /* FIXME: DRV avail IE is not always needed */
132 /* put DRP avail IE first */
133 memcpy(IEDataptr, &rc->drp_avail.ie, sizeof(rc->drp_avail.ie));
134 IEDataptr += sizeof(struct uwb_ie_drp_avail);
135
David Vrabel8cc13a02008-09-17 16:34:09 +0100136 /* Next traverse all reservations to place IEs in allocated memory. */
137 list_for_each_entry(rsv, &rc->reservations, rc_node) {
138 if (rsv->drp_ie != NULL) {
139 memcpy(IEDataptr, rsv->drp_ie,
140 rsv->drp_ie->hdr.length + 2);
141 IEDataptr += rsv->drp_ie->hdr.length + 2;
Stefano Panella5b377172008-12-12 13:00:06 +0000142
143 if (uwb_rsv_has_two_drp_ies(rsv) &&
144 (rsv->mv.companion_drp_ie != NULL)) {
145 mv = &rsv->mv;
146 memcpy(IEDataptr, mv->companion_drp_ie,
147 mv->companion_drp_ie->hdr.length + 2);
148 IEDataptr += mv->companion_drp_ie->hdr.length + 2;
149 }
David Vrabel8cc13a02008-09-17 16:34:09 +0100150 }
151 }
David Vrabel8cc13a02008-09-17 16:34:09 +0100152
Stefano Panella5b377172008-12-12 13:00:06 +0000153 result = uwb_rc_cmd_async(rc, "SET-DRP-IE", &cmd->rccb, sizeof(*cmd) + num_bytes,
154 UWB_RC_CET_GENERAL, UWB_RC_CMD_SET_DRP_IE,
155 uwb_rc_set_drp_cmd_done, NULL);
156
157 rc->set_drp_ie_pending = 1;
158
David Vrabel8cc13a02008-09-17 16:34:09 +0100159 kfree(cmd);
160error:
161 return result;
David Vrabel8cc13a02008-09-17 16:34:09 +0100162}
163
Stefano Panella5b377172008-12-12 13:00:06 +0000164/*
165 * Evaluate the action to perform using conflict resolution rules
166 *
167 * Return a uwb_drp_conflict_action.
168 */
169static int evaluate_conflict_action(struct uwb_ie_drp *ext_drp_ie, int ext_beacon_slot,
170 struct uwb_rsv *rsv, int our_status)
David Vrabel8cc13a02008-09-17 16:34:09 +0100171{
Stefano Panella5b377172008-12-12 13:00:06 +0000172 int our_tie_breaker = rsv->tiebreaker;
173 int our_type = rsv->type;
174 int our_beacon_slot = rsv->rc->uwb_dev.beacon_slot;
David Vrabel8cc13a02008-09-17 16:34:09 +0100175
Stefano Panella5b377172008-12-12 13:00:06 +0000176 int ext_tie_breaker = uwb_ie_drp_tiebreaker(ext_drp_ie);
177 int ext_status = uwb_ie_drp_status(ext_drp_ie);
178 int ext_type = uwb_ie_drp_type(ext_drp_ie);
179
180
181 /* [ECMA-368 2nd Edition] 17.4.6 */
182 if (ext_type == UWB_DRP_TYPE_PCA && our_type == UWB_DRP_TYPE_PCA) {
183 return UWB_DRP_CONFLICT_MANTAIN;
David Vrabel8cc13a02008-09-17 16:34:09 +0100184 }
Stefano Panella5b377172008-12-12 13:00:06 +0000185
186 /* [ECMA-368 2nd Edition] 17.4.6-1 */
187 if (our_type == UWB_DRP_TYPE_ALIEN_BP) {
188 return UWB_DRP_CONFLICT_MANTAIN;
189 }
190
191 /* [ECMA-368 2nd Edition] 17.4.6-2 */
192 if (ext_type == UWB_DRP_TYPE_ALIEN_BP) {
193 /* here we know our_type != UWB_DRP_TYPE_ALIEN_BP */
194 return UWB_DRP_CONFLICT_ACT1;
195 }
196
197 /* [ECMA-368 2nd Edition] 17.4.6-3 */
198 if (our_status == 0 && ext_status == 1) {
199 return UWB_DRP_CONFLICT_ACT2;
200 }
201
202 /* [ECMA-368 2nd Edition] 17.4.6-4 */
203 if (our_status == 1 && ext_status == 0) {
204 return UWB_DRP_CONFLICT_MANTAIN;
205 }
206
207 /* [ECMA-368 2nd Edition] 17.4.6-5a */
208 if (our_tie_breaker == ext_tie_breaker &&
209 our_beacon_slot < ext_beacon_slot) {
210 return UWB_DRP_CONFLICT_MANTAIN;
211 }
212
213 /* [ECMA-368 2nd Edition] 17.4.6-5b */
214 if (our_tie_breaker != ext_tie_breaker &&
215 our_beacon_slot > ext_beacon_slot) {
216 return UWB_DRP_CONFLICT_MANTAIN;
217 }
218
219 if (our_status == 0) {
220 if (our_tie_breaker == ext_tie_breaker) {
221 /* [ECMA-368 2nd Edition] 17.4.6-6a */
222 if (our_beacon_slot > ext_beacon_slot) {
223 return UWB_DRP_CONFLICT_ACT2;
224 }
225 } else {
226 /* [ECMA-368 2nd Edition] 17.4.6-6b */
227 if (our_beacon_slot < ext_beacon_slot) {
228 return UWB_DRP_CONFLICT_ACT2;
229 }
230 }
231 } else {
232 if (our_tie_breaker == ext_tie_breaker) {
233 /* [ECMA-368 2nd Edition] 17.4.6-7a */
234 if (our_beacon_slot > ext_beacon_slot) {
235 return UWB_DRP_CONFLICT_ACT3;
236 }
237 } else {
238 /* [ECMA-368 2nd Edition] 17.4.6-7b */
239 if (our_beacon_slot < ext_beacon_slot) {
240 return UWB_DRP_CONFLICT_ACT3;
241 }
242 }
243 }
244 return UWB_DRP_CONFLICT_MANTAIN;
David Vrabel8cc13a02008-09-17 16:34:09 +0100245}
246
Stefano Panella5b377172008-12-12 13:00:06 +0000247static void handle_conflict_normal(struct uwb_ie_drp *drp_ie,
248 int ext_beacon_slot,
249 struct uwb_rsv *rsv,
250 struct uwb_mas_bm *conflicting_mas)
251{
252 struct uwb_rc *rc = rsv->rc;
253 struct uwb_rsv_move *mv = &rsv->mv;
254 struct uwb_drp_backoff_win *bow = &rc->bow;
255 int action;
256
257 action = evaluate_conflict_action(drp_ie, ext_beacon_slot, rsv, uwb_rsv_status(rsv));
258
259 if (uwb_rsv_is_owner(rsv)) {
260 switch(action) {
261 case UWB_DRP_CONFLICT_ACT2:
262 /* try move */
263 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_TO_BE_MOVED);
264 if (bow->can_reserve_extra_mases == false)
265 uwb_rsv_backoff_win_increment(rc);
266
267 break;
268 case UWB_DRP_CONFLICT_ACT3:
269 uwb_rsv_backoff_win_increment(rc);
270 /* drop some mases with reason modified */
271 /* put in the companion the mases to be dropped */
272 bitmap_and(mv->companion_mas.bm, rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS);
273 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED);
274 default:
275 break;
276 }
277 } else {
278 switch(action) {
279 case UWB_DRP_CONFLICT_ACT2:
280 case UWB_DRP_CONFLICT_ACT3:
281 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT);
282 default:
283 break;
284 }
285
286 }
287
288}
289
290static void handle_conflict_expanding(struct uwb_ie_drp *drp_ie, int ext_beacon_slot,
291 struct uwb_rsv *rsv, bool companion_only,
292 struct uwb_mas_bm *conflicting_mas)
293{
294 struct uwb_rc *rc = rsv->rc;
295 struct uwb_drp_backoff_win *bow = &rc->bow;
296 struct uwb_rsv_move *mv = &rsv->mv;
297 int action;
298
299 if (companion_only) {
300 /* status of companion is 0 at this point */
301 action = evaluate_conflict_action(drp_ie, ext_beacon_slot, rsv, 0);
302 if (uwb_rsv_is_owner(rsv)) {
303 switch(action) {
304 case UWB_DRP_CONFLICT_ACT2:
305 case UWB_DRP_CONFLICT_ACT3:
306 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
307 rsv->needs_release_companion_mas = false;
308 if (bow->can_reserve_extra_mases == false)
309 uwb_rsv_backoff_win_increment(rc);
310 uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas);
311 }
312 } else { /* rsv is target */
313 switch(action) {
314 case UWB_DRP_CONFLICT_ACT2:
315 case UWB_DRP_CONFLICT_ACT3:
316 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_EXPANDING_CONFLICT);
317 /* send_drp_avail_ie = true; */
318 }
319 }
320 } else { /* also base part of the reservation is conflicting */
321 if (uwb_rsv_is_owner(rsv)) {
322 uwb_rsv_backoff_win_increment(rc);
323 /* remove companion part */
324 uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas);
325
326 /* drop some mases with reason modified */
327
328 /* put in the companion the mases to be dropped */
329 bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS);
330 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED);
331 } else { /* it is a target rsv */
332 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT);
333 /* send_drp_avail_ie = true; */
334 }
335 }
336}
337
338static void uwb_drp_handle_conflict_rsv(struct uwb_rc *rc, struct uwb_rsv *rsv,
339 struct uwb_rc_evt_drp *drp_evt,
340 struct uwb_ie_drp *drp_ie,
341 struct uwb_mas_bm *conflicting_mas)
342{
343 struct uwb_rsv_move *mv;
344
345 /* check if the conflicting reservation has two drp_ies */
346 if (uwb_rsv_has_two_drp_ies(rsv)) {
347 mv = &rsv->mv;
348 if (bitmap_intersects(rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS)) {
349 handle_conflict_expanding(drp_ie, drp_evt->beacon_slot_number,
350 rsv, false, conflicting_mas);
351 } else {
352 if (bitmap_intersects(mv->companion_mas.bm, conflicting_mas->bm, UWB_NUM_MAS)) {
353 handle_conflict_expanding(drp_ie, drp_evt->beacon_slot_number,
354 rsv, true, conflicting_mas);
355 }
356 }
357 } else if (bitmap_intersects(rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS)) {
358 handle_conflict_normal(drp_ie, drp_evt->beacon_slot_number, rsv, conflicting_mas);
359 }
360}
361
362static void uwb_drp_handle_all_conflict_rsv(struct uwb_rc *rc,
363 struct uwb_rc_evt_drp *drp_evt,
364 struct uwb_ie_drp *drp_ie,
365 struct uwb_mas_bm *conflicting_mas)
366{
367 struct uwb_rsv *rsv;
368
369 list_for_each_entry(rsv, &rc->reservations, rc_node) {
370 uwb_drp_handle_conflict_rsv(rc, rsv, drp_evt, drp_ie, conflicting_mas);
371 }
372}
373
David Vrabel8cc13a02008-09-17 16:34:09 +0100374/*
375 * Based on the DRP IE, transition a target reservation to a new
376 * state.
377 */
378static void uwb_drp_process_target(struct uwb_rc *rc, struct uwb_rsv *rsv,
Stefano Panella5b377172008-12-12 13:00:06 +0000379 struct uwb_ie_drp *drp_ie, struct uwb_rc_evt_drp *drp_evt)
David Vrabel8cc13a02008-09-17 16:34:09 +0100380{
381 struct device *dev = &rc->uwb_dev.dev;
Stefano Panella5b377172008-12-12 13:00:06 +0000382 struct uwb_rsv_move *mv = &rsv->mv;
David Vrabel8cc13a02008-09-17 16:34:09 +0100383 int status;
384 enum uwb_drp_reason reason_code;
Stefano Panella5b377172008-12-12 13:00:06 +0000385 struct uwb_mas_bm mas;
386
David Vrabel8cc13a02008-09-17 16:34:09 +0100387 status = uwb_ie_drp_status(drp_ie);
388 reason_code = uwb_ie_drp_reason_code(drp_ie);
Stefano Panella5b377172008-12-12 13:00:06 +0000389 uwb_drp_ie_to_bm(&mas, drp_ie);
David Vrabel8cc13a02008-09-17 16:34:09 +0100390
Stefano Panella5b377172008-12-12 13:00:06 +0000391 switch (reason_code) {
392 case UWB_DRP_REASON_ACCEPTED:
393
394 if (rsv->state == UWB_RSV_STATE_T_CONFLICT) {
395 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT);
396 break;
397 }
398
399 if (rsv->state == UWB_RSV_STATE_T_EXPANDING_ACCEPTED) {
400 /* drp_ie is companion */
401 if (!bitmap_equal(rsv->mas.bm, mas.bm, UWB_NUM_MAS))
402 /* stroke companion */
403 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_EXPANDING_ACCEPTED);
404 } else {
405 if (!bitmap_equal(rsv->mas.bm, mas.bm, UWB_NUM_MAS)) {
406 if (uwb_drp_avail_reserve_pending(rc, &mas) == -EBUSY) {
407 /* FIXME: there is a conflict, find
408 * the conflicting reservations and
409 * take a sensible action. Consider
410 * that in drp_ie there is the
411 * "neighbour" */
412 uwb_drp_handle_all_conflict_rsv(rc, drp_evt, drp_ie, &mas);
413 } else {
414 /* accept the extra reservation */
415 bitmap_copy(mv->companion_mas.bm, mas.bm, UWB_NUM_MAS);
416 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_EXPANDING_ACCEPTED);
417 }
418 } else {
419 if (status) {
420 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED);
421 }
422 }
423
424 }
425 break;
426
427 case UWB_DRP_REASON_MODIFIED:
428 /* check to see if we have already modified the reservation */
429 if (bitmap_equal(rsv->mas.bm, mas.bm, UWB_NUM_MAS)) {
David Vrabel8cc13a02008-09-17 16:34:09 +0100430 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED);
431 break;
David Vrabel8cc13a02008-09-17 16:34:09 +0100432 }
Stefano Panella5b377172008-12-12 13:00:06 +0000433
434 /* find if the owner wants to expand or reduce */
435 if (bitmap_subset(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) {
436 /* owner is reducing */
437 bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, mas.bm, UWB_NUM_MAS);
438 uwb_drp_avail_release(rsv->rc, &mv->companion_mas);
David Vrabel8cc13a02008-09-17 16:34:09 +0100439 }
Stefano Panella5b377172008-12-12 13:00:06 +0000440
441 bitmap_copy(rsv->mas.bm, mas.bm, UWB_NUM_MAS);
442 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_RESIZED);
443 break;
444 default:
445 dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
446 reason_code, status);
David Vrabel8cc13a02008-09-17 16:34:09 +0100447 }
448}
449
450/*
451 * Based on the DRP IE, transition an owner reservation to a new
452 * state.
453 */
454static void uwb_drp_process_owner(struct uwb_rc *rc, struct uwb_rsv *rsv,
Stefano Panella5b377172008-12-12 13:00:06 +0000455 struct uwb_dev *src, struct uwb_ie_drp *drp_ie,
456 struct uwb_rc_evt_drp *drp_evt)
David Vrabel8cc13a02008-09-17 16:34:09 +0100457{
458 struct device *dev = &rc->uwb_dev.dev;
Stefano Panella5b377172008-12-12 13:00:06 +0000459 struct uwb_rsv_move *mv = &rsv->mv;
David Vrabel8cc13a02008-09-17 16:34:09 +0100460 int status;
461 enum uwb_drp_reason reason_code;
Stefano Panella5b377172008-12-12 13:00:06 +0000462 struct uwb_mas_bm mas;
David Vrabel8cc13a02008-09-17 16:34:09 +0100463
464 status = uwb_ie_drp_status(drp_ie);
465 reason_code = uwb_ie_drp_reason_code(drp_ie);
Stefano Panella5b377172008-12-12 13:00:06 +0000466 uwb_drp_ie_to_bm(&mas, drp_ie);
David Vrabel8cc13a02008-09-17 16:34:09 +0100467
468 if (status) {
469 switch (reason_code) {
470 case UWB_DRP_REASON_ACCEPTED:
Stefano Panella5b377172008-12-12 13:00:06 +0000471 switch (rsv->state) {
472 case UWB_RSV_STATE_O_PENDING:
473 case UWB_RSV_STATE_O_INITIATED:
474 case UWB_RSV_STATE_O_ESTABLISHED:
475 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
476 break;
477 case UWB_RSV_STATE_O_MODIFIED:
478 if (bitmap_equal(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) {
479 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
480 } else {
481 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED);
482 }
483 break;
484
485 case UWB_RSV_STATE_O_MOVE_REDUCING: /* shouldn' t be a problem */
486 if (bitmap_equal(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) {
487 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
488 } else {
489 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING);
490 }
491 break;
492 case UWB_RSV_STATE_O_MOVE_EXPANDING:
493 if (bitmap_equal(mas.bm, mv->companion_mas.bm, UWB_NUM_MAS)) {
494 /* Companion reservation accepted */
495 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING);
496 } else {
497 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING);
498 }
499 break;
500 case UWB_RSV_STATE_O_MOVE_COMBINING:
501 if (bitmap_equal(mas.bm, rsv->mas.bm, UWB_NUM_MAS))
502 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING);
503 else
504 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING);
505 break;
506 default:
507 break;
508 }
David Vrabel8cc13a02008-09-17 16:34:09 +0100509 break;
510 default:
511 dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
512 reason_code, status);
513 }
514 } else {
515 switch (reason_code) {
516 case UWB_DRP_REASON_PENDING:
517 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_PENDING);
518 break;
519 case UWB_DRP_REASON_DENIED:
520 uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
521 break;
522 case UWB_DRP_REASON_CONFLICT:
Stefano Panella5b377172008-12-12 13:00:06 +0000523 /* resolve the conflict */
524 bitmap_complement(mas.bm, src->last_availability_bm,
525 UWB_NUM_MAS);
526 uwb_drp_handle_conflict_rsv(rc, rsv, drp_evt, drp_ie, &mas);
David Vrabel8cc13a02008-09-17 16:34:09 +0100527 break;
528 default:
529 dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
530 reason_code, status);
531 }
532 }
533}
534
Stefano Panella5b377172008-12-12 13:00:06 +0000535static void uwb_cnflt_alien_stroke_timer(struct uwb_cnflt_alien *cnflt)
536{
537 unsigned timeout_us = UWB_MAX_LOST_BEACONS * UWB_SUPERFRAME_LENGTH_US;
538 mod_timer(&cnflt->timer, jiffies + usecs_to_jiffies(timeout_us));
539}
540
541static void uwb_cnflt_update_work(struct work_struct *work)
542{
543 struct uwb_cnflt_alien *cnflt = container_of(work,
544 struct uwb_cnflt_alien,
545 cnflt_update_work);
546 struct uwb_cnflt_alien *c;
547 struct uwb_rc *rc = cnflt->rc;
548
549 unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE;
550
551 mutex_lock(&rc->rsvs_mutex);
552
553 list_del(&cnflt->rc_node);
554
555 /* update rc global conflicting alien bitmap */
556 bitmap_zero(rc->cnflt_alien_bitmap.bm, UWB_NUM_MAS);
557
558 list_for_each_entry(c, &rc->cnflt_alien_list, rc_node) {
559 bitmap_or(rc->cnflt_alien_bitmap.bm, rc->cnflt_alien_bitmap.bm, c->mas.bm, UWB_NUM_MAS);
560 }
561
562 queue_delayed_work(rc->rsv_workq, &rc->rsv_alien_bp_work, usecs_to_jiffies(delay_us));
563
564 kfree(cnflt);
565 mutex_unlock(&rc->rsvs_mutex);
566}
567
568static void uwb_cnflt_timer(unsigned long arg)
569{
570 struct uwb_cnflt_alien *cnflt = (struct uwb_cnflt_alien *)arg;
571
572 queue_work(cnflt->rc->rsv_workq, &cnflt->cnflt_update_work);
573}
574
David Vrabel8cc13a02008-09-17 16:34:09 +0100575/*
Stefano Panella5b377172008-12-12 13:00:06 +0000576 * We have received an DRP_IE of type Alien BP and we need to make
577 * sure we do not transmit in conflicting MASs.
David Vrabel8cc13a02008-09-17 16:34:09 +0100578 */
Stefano Panella5b377172008-12-12 13:00:06 +0000579static void uwb_drp_handle_alien_drp(struct uwb_rc *rc, struct uwb_ie_drp *drp_ie)
580{
581 struct device *dev = &rc->uwb_dev.dev;
582 struct uwb_mas_bm mas;
583 struct uwb_cnflt_alien *cnflt;
584 char buf[72];
585 unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE;
586
587 uwb_drp_ie_to_bm(&mas, drp_ie);
588 bitmap_scnprintf(buf, sizeof(buf), mas.bm, UWB_NUM_MAS);
589
590 list_for_each_entry(cnflt, &rc->cnflt_alien_list, rc_node) {
591 if (bitmap_equal(cnflt->mas.bm, mas.bm, UWB_NUM_MAS)) {
592 /* Existing alien BP reservation conflicting
593 * bitmap, just reset the timer */
594 uwb_cnflt_alien_stroke_timer(cnflt);
595 return;
596 }
597 }
598
599 /* New alien BP reservation conflicting bitmap */
600
601 /* alloc and initialize new uwb_cnflt_alien */
602 cnflt = kzalloc(sizeof(struct uwb_cnflt_alien), GFP_KERNEL);
Daeseok Younf7a87192014-04-16 18:48:02 +0900603 if (!cnflt) {
Stefano Panella5b377172008-12-12 13:00:06 +0000604 dev_err(dev, "failed to alloc uwb_cnflt_alien struct\n");
Daeseok Younf7a87192014-04-16 18:48:02 +0900605 return;
606 }
607
Stefano Panella5b377172008-12-12 13:00:06 +0000608 INIT_LIST_HEAD(&cnflt->rc_node);
609 init_timer(&cnflt->timer);
610 cnflt->timer.function = uwb_cnflt_timer;
611 cnflt->timer.data = (unsigned long)cnflt;
612
613 cnflt->rc = rc;
614 INIT_WORK(&cnflt->cnflt_update_work, uwb_cnflt_update_work);
615
616 bitmap_copy(cnflt->mas.bm, mas.bm, UWB_NUM_MAS);
617
618 list_add_tail(&cnflt->rc_node, &rc->cnflt_alien_list);
619
620 /* update rc global conflicting alien bitmap */
621 bitmap_or(rc->cnflt_alien_bitmap.bm, rc->cnflt_alien_bitmap.bm, mas.bm, UWB_NUM_MAS);
622
623 queue_delayed_work(rc->rsv_workq, &rc->rsv_alien_bp_work, usecs_to_jiffies(delay_us));
624
625 /* start the timer */
626 uwb_cnflt_alien_stroke_timer(cnflt);
627}
628
629static void uwb_drp_process_not_involved(struct uwb_rc *rc,
630 struct uwb_rc_evt_drp *drp_evt,
631 struct uwb_ie_drp *drp_ie)
632{
633 struct uwb_mas_bm mas;
634
635 uwb_drp_ie_to_bm(&mas, drp_ie);
636 uwb_drp_handle_all_conflict_rsv(rc, drp_evt, drp_ie, &mas);
637}
638
639static void uwb_drp_process_involved(struct uwb_rc *rc, struct uwb_dev *src,
640 struct uwb_rc_evt_drp *drp_evt,
641 struct uwb_ie_drp *drp_ie)
David Vrabel8cc13a02008-09-17 16:34:09 +0100642{
643 struct uwb_rsv *rsv;
644
645 rsv = uwb_rsv_find(rc, src, drp_ie);
646 if (!rsv) {
647 /*
648 * No reservation? It's either for a recently
649 * terminated reservation; or the DRP IE couldn't be
650 * processed (e.g., an invalid IE or out of memory).
651 */
652 return;
653 }
Stefano Panella5b377172008-12-12 13:00:06 +0000654
David Vrabel8cc13a02008-09-17 16:34:09 +0100655 /*
656 * Do nothing with DRP IEs for reservations that have been
657 * terminated.
658 */
659 if (rsv->state == UWB_RSV_STATE_NONE) {
660 uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
661 return;
662 }
Stefano Panella5b377172008-12-12 13:00:06 +0000663
David Vrabel8cc13a02008-09-17 16:34:09 +0100664 if (uwb_ie_drp_owner(drp_ie))
Stefano Panella5b377172008-12-12 13:00:06 +0000665 uwb_drp_process_target(rc, rsv, drp_ie, drp_evt);
David Vrabel8cc13a02008-09-17 16:34:09 +0100666 else
Stefano Panella5b377172008-12-12 13:00:06 +0000667 uwb_drp_process_owner(rc, rsv, src, drp_ie, drp_evt);
668
David Vrabel8cc13a02008-09-17 16:34:09 +0100669}
670
671
Stefano Panella5b377172008-12-12 13:00:06 +0000672static bool uwb_drp_involves_us(struct uwb_rc *rc, struct uwb_ie_drp *drp_ie)
673{
674 return uwb_dev_addr_cmp(&rc->uwb_dev.dev_addr, &drp_ie->dev_addr) == 0;
675}
676
677/*
678 * Process a received DRP IE.
679 */
680static void uwb_drp_process(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt,
681 struct uwb_dev *src, struct uwb_ie_drp *drp_ie)
682{
683 if (uwb_ie_drp_type(drp_ie) == UWB_DRP_TYPE_ALIEN_BP)
684 uwb_drp_handle_alien_drp(rc, drp_ie);
685 else if (uwb_drp_involves_us(rc, drp_ie))
686 uwb_drp_process_involved(rc, src, drp_evt, drp_ie);
687 else
688 uwb_drp_process_not_involved(rc, drp_evt, drp_ie);
689}
690
691/*
692 * Process a received DRP Availability IE
693 */
694static void uwb_drp_availability_process(struct uwb_rc *rc, struct uwb_dev *src,
695 struct uwb_ie_drp_avail *drp_availability_ie)
696{
697 bitmap_copy(src->last_availability_bm,
698 drp_availability_ie->bmp, UWB_NUM_MAS);
699}
700
David Vrabel8cc13a02008-09-17 16:34:09 +0100701/*
702 * Process all the DRP IEs (both DRP IEs and the DRP Availability IE)
703 * from a device.
704 */
705static
706void uwb_drp_process_all(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt,
707 size_t ielen, struct uwb_dev *src_dev)
708{
709 struct device *dev = &rc->uwb_dev.dev;
710 struct uwb_ie_hdr *ie_hdr;
711 void *ptr;
712
713 ptr = drp_evt->ie_data;
714 for (;;) {
715 ie_hdr = uwb_ie_next(&ptr, &ielen);
716 if (!ie_hdr)
717 break;
718
719 switch (ie_hdr->element_id) {
720 case UWB_IE_DRP_AVAILABILITY:
Stefano Panella5b377172008-12-12 13:00:06 +0000721 uwb_drp_availability_process(rc, src_dev, (struct uwb_ie_drp_avail *)ie_hdr);
David Vrabel8cc13a02008-09-17 16:34:09 +0100722 break;
723 case UWB_IE_DRP:
Stefano Panella5b377172008-12-12 13:00:06 +0000724 uwb_drp_process(rc, drp_evt, src_dev, (struct uwb_ie_drp *)ie_hdr);
David Vrabel8cc13a02008-09-17 16:34:09 +0100725 break;
726 default:
727 dev_warn(dev, "unexpected IE in DRP notification\n");
728 break;
729 }
730 }
731
732 if (ielen > 0)
733 dev_warn(dev, "%d octets remaining in DRP notification\n",
734 (int)ielen);
735}
736
David Vrabel8cc13a02008-09-17 16:34:09 +0100737/**
738 * uwbd_evt_handle_rc_drp - handle a DRP_IE event
739 * @evt: the DRP_IE event from the radio controller
740 *
741 * This processes DRP notifications from the radio controller, either
742 * initiating a new reservation or transitioning an existing
743 * reservation into a different state.
744 *
745 * DRP notifications can occur for three different reasons:
746 *
747 * - UWB_DRP_NOTIF_DRP_IE_RECVD: one or more DRP IEs with the RC as
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300748 * the target or source have been received.
David Vrabel8cc13a02008-09-17 16:34:09 +0100749 *
750 * These DRP IEs could be new or for an existing reservation.
751 *
752 * If the DRP IE for an existing reservation ceases to be to
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300753 * received for at least mMaxLostBeacons, the reservation should be
David Vrabel8cc13a02008-09-17 16:34:09 +0100754 * considered to be terminated. Note that the TERMINATE reason (see
755 * below) may not always be signalled (e.g., the remote device has
756 * two or more reservations established with the RC).
757 *
758 * - UWB_DRP_NOTIF_CONFLICT: DRP IEs from any device in the beacon
759 * group conflict with the RC's reservations.
760 *
761 * - UWB_DRP_NOTIF_TERMINATE: DRP IEs are no longer being received
762 * from a device (i.e., it's terminated all reservations).
763 *
764 * Only the software state of the reservations is changed; the setting
765 * of the radio controller's DRP IEs is done after all the events in
766 * an event buffer are processed. This saves waiting multiple times
767 * for the SET_DRP_IE command to complete.
768 */
769int uwbd_evt_handle_rc_drp(struct uwb_event *evt)
770{
771 struct device *dev = &evt->rc->uwb_dev.dev;
772 struct uwb_rc *rc = evt->rc;
773 struct uwb_rc_evt_drp *drp_evt;
774 size_t ielength, bytes_left;
775 struct uwb_dev_addr src_addr;
776 struct uwb_dev *src_dev;
David Vrabel8cc13a02008-09-17 16:34:09 +0100777
778 /* Is there enough data to decode the event (and any IEs in
779 its payload)? */
780 if (evt->notif.size < sizeof(*drp_evt)) {
781 dev_err(dev, "DRP event: Not enough data to decode event "
782 "[%zu bytes left, %zu needed]\n",
783 evt->notif.size, sizeof(*drp_evt));
784 return 0;
785 }
786 bytes_left = evt->notif.size - sizeof(*drp_evt);
787 drp_evt = container_of(evt->notif.rceb, struct uwb_rc_evt_drp, rceb);
788 ielength = le16_to_cpu(drp_evt->ie_length);
789 if (bytes_left != ielength) {
790 dev_err(dev, "DRP event: Not enough data in payload [%zu"
791 "bytes left, %zu declared in the event]\n",
792 bytes_left, ielength);
793 return 0;
794 }
795
796 memcpy(src_addr.data, &drp_evt->src_addr, sizeof(src_addr));
797 src_dev = uwb_dev_get_by_devaddr(rc, &src_addr);
798 if (!src_dev) {
799 /*
800 * A DRP notification from an unrecognized device.
801 *
802 * This is probably from a WUSB device that doesn't
803 * have an EUI-48 and therefore doesn't show up in the
804 * UWB device database. It's safe to simply ignore
805 * these.
806 */
807 return 0;
808 }
809
810 mutex_lock(&rc->rsvs_mutex);
811
Stefano Panella5b377172008-12-12 13:00:06 +0000812 /* We do not distinguish from the reason */
813 uwb_drp_process_all(rc, drp_evt, ielength, src_dev);
David Vrabel8cc13a02008-09-17 16:34:09 +0100814
815 mutex_unlock(&rc->rsvs_mutex);
816
817 uwb_dev_put(src_dev);
818 return 0;
819}