blob: 38d0504a1bbc50a29a3d639a772648ef385e5692 [file] [log] [blame]
David Vrabel8cc13a02008-09-17 16:34:09 +01001/*
2 * Ultra Wide Band
3 * Dynamic Reservation Protocol handling
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21#include <linux/kthread.h>
22#include <linux/freezer.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090023#include <linux/slab.h>
David Vrabel8cc13a02008-09-17 16:34:09 +010024#include <linux/delay.h>
25#include "uwb-internal.h"
26
Stefano Panella5b377172008-12-12 13:00:06 +000027
28/* DRP Conflict Actions ([ECMA-368 2nd Edition] 17.4.6) */
29enum uwb_drp_conflict_action {
Lucas De Marchi25985ed2011-03-30 22:57:33 -030030 /* Reservation is maintained, no action needed */
Stefano Panella5b377172008-12-12 13:00:06 +000031 UWB_DRP_CONFLICT_MANTAIN = 0,
Thomas Pugliesed339a1f2014-04-28 14:53:25 -050032
Stefano Panella5b377172008-12-12 13:00:06 +000033 /* the device shall not transmit frames in conflicting MASs in
34 * the following superframe. If the device is the reservation
35 * target, it shall also set the Reason Code in its DRP IE to
36 * Conflict in its beacon in the following superframe.
37 */
38 UWB_DRP_CONFLICT_ACT1,
Thomas Pugliesed339a1f2014-04-28 14:53:25 -050039
Stefano Panella5b377172008-12-12 13:00:06 +000040 /* the device shall not set the Reservation Status bit to ONE
41 * and shall not transmit frames in conflicting MASs. If the
42 * device is the reservation target, it shall also set the
43 * Reason Code in its DRP IE to Conflict.
Thomas Pugliesed339a1f2014-04-28 14:53:25 -050044 */
Stefano Panella5b377172008-12-12 13:00:06 +000045 UWB_DRP_CONFLICT_ACT2,
46
47 /* the device shall not transmit frames in conflicting MASs in
48 * the following superframe. It shall remove the conflicting
49 * MASs from the reservation or set the Reservation Status to
50 * ZERO in its beacon in the following superframe. If the
51 * device is the reservation target, it shall also set the
52 * Reason Code in its DRP IE to Conflict.
53 */
54 UWB_DRP_CONFLICT_ACT3,
55};
56
57
58static void uwb_rc_set_drp_cmd_done(struct uwb_rc *rc, void *arg,
59 struct uwb_rceb *reply, ssize_t reply_size)
60{
61 struct uwb_rc_evt_set_drp_ie *r = (struct uwb_rc_evt_set_drp_ie *)reply;
Thomas Pugliesec996b932014-04-23 14:42:47 -050062 unsigned long flags;
Stefano Panella5b377172008-12-12 13:00:06 +000063
64 if (r != NULL) {
65 if (r->bResultCode != UWB_RC_RES_SUCCESS)
66 dev_err(&rc->uwb_dev.dev, "SET-DRP-IE failed: %s (%d)\n",
67 uwb_rc_strerror(r->bResultCode), r->bResultCode);
68 } else
69 dev_err(&rc->uwb_dev.dev, "SET-DRP-IE: timeout\n");
70
Thomas Pugliesec996b932014-04-23 14:42:47 -050071 spin_lock_irqsave(&rc->rsvs_lock, flags);
Stefano Panella5b377172008-12-12 13:00:06 +000072 if (rc->set_drp_ie_pending > 1) {
73 rc->set_drp_ie_pending = 0;
Thomas Pugliesec996b932014-04-23 14:42:47 -050074 uwb_rsv_queue_update(rc);
Stefano Panella5b377172008-12-12 13:00:06 +000075 } else {
Thomas Pugliesec996b932014-04-23 14:42:47 -050076 rc->set_drp_ie_pending = 0;
Stefano Panella5b377172008-12-12 13:00:06 +000077 }
Thomas Pugliesec996b932014-04-23 14:42:47 -050078 spin_unlock_irqrestore(&rc->rsvs_lock, flags);
Stefano Panella5b377172008-12-12 13:00:06 +000079}
80
David Vrabel8cc13a02008-09-17 16:34:09 +010081/**
82 * Construct and send the SET DRP IE
83 *
84 * @rc: UWB Host controller
85 * @returns: >= 0 number of bytes still available in the beacon
86 * < 0 errno code on error.
87 *
88 * See WUSB[8.6.2.7]: The host must set all the DRP IEs that it wants the
89 * device to include in its beacon at the same time. We thus have to
90 * traverse all reservations and include the DRP IEs of all PENDING
91 * and NEGOTIATED reservations in a SET DRP command for transmission.
92 *
93 * A DRP Availability IE is appended.
94 *
David Vrabel6fae35f2008-11-17 15:53:42 +000095 * rc->rsvs_mutex is held
David Vrabel8cc13a02008-09-17 16:34:09 +010096 *
97 * FIXME We currently ignore the returned value indicating the remaining space
98 * in beacon. This could be used to deny reservation requests earlier if
99 * determined that they would cause the beacon space to be exceeded.
100 */
David Vrabel6fae35f2008-11-17 15:53:42 +0000101int uwb_rc_send_all_drp_ie(struct uwb_rc *rc)
David Vrabel8cc13a02008-09-17 16:34:09 +0100102{
103 int result;
David Vrabel8cc13a02008-09-17 16:34:09 +0100104 struct uwb_rc_cmd_set_drp_ie *cmd;
David Vrabel8cc13a02008-09-17 16:34:09 +0100105 struct uwb_rsv *rsv;
Stefano Panella5b377172008-12-12 13:00:06 +0000106 struct uwb_rsv_move *mv;
David Vrabel8cc13a02008-09-17 16:34:09 +0100107 int num_bytes = 0;
108 u8 *IEDataptr;
109
110 result = -ENOMEM;
111 /* First traverse all reservations to determine memory needed. */
112 list_for_each_entry(rsv, &rc->reservations, rc_node) {
Stefano Panella5b377172008-12-12 13:00:06 +0000113 if (rsv->drp_ie != NULL) {
David Vrabel8cc13a02008-09-17 16:34:09 +0100114 num_bytes += rsv->drp_ie->hdr.length + 2;
Stefano Panella5b377172008-12-12 13:00:06 +0000115 if (uwb_rsv_has_two_drp_ies(rsv) &&
116 (rsv->mv.companion_drp_ie != NULL)) {
117 mv = &rsv->mv;
Thomas Pugliesed339a1f2014-04-28 14:53:25 -0500118 num_bytes +=
119 mv->companion_drp_ie->hdr.length + 2;
Stefano Panella5b377172008-12-12 13:00:06 +0000120 }
121 }
David Vrabel8cc13a02008-09-17 16:34:09 +0100122 }
123 num_bytes += sizeof(rc->drp_avail.ie);
124 cmd = kzalloc(sizeof(*cmd) + num_bytes, GFP_KERNEL);
125 if (cmd == NULL)
126 goto error;
127 cmd->rccb.bCommandType = UWB_RC_CET_GENERAL;
128 cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_SET_DRP_IE);
129 cmd->wIELength = num_bytes;
130 IEDataptr = (u8 *)&cmd->IEData[0];
131
Stefano Panella5b377172008-12-12 13:00:06 +0000132 /* FIXME: DRV avail IE is not always needed */
133 /* put DRP avail IE first */
134 memcpy(IEDataptr, &rc->drp_avail.ie, sizeof(rc->drp_avail.ie));
135 IEDataptr += sizeof(struct uwb_ie_drp_avail);
136
David Vrabel8cc13a02008-09-17 16:34:09 +0100137 /* Next traverse all reservations to place IEs in allocated memory. */
138 list_for_each_entry(rsv, &rc->reservations, rc_node) {
139 if (rsv->drp_ie != NULL) {
140 memcpy(IEDataptr, rsv->drp_ie,
141 rsv->drp_ie->hdr.length + 2);
142 IEDataptr += rsv->drp_ie->hdr.length + 2;
Thomas Pugliesed339a1f2014-04-28 14:53:25 -0500143
Stefano Panella5b377172008-12-12 13:00:06 +0000144 if (uwb_rsv_has_two_drp_ies(rsv) &&
145 (rsv->mv.companion_drp_ie != NULL)) {
146 mv = &rsv->mv;
147 memcpy(IEDataptr, mv->companion_drp_ie,
148 mv->companion_drp_ie->hdr.length + 2);
Thomas Pugliesed339a1f2014-04-28 14:53:25 -0500149 IEDataptr +=
150 mv->companion_drp_ie->hdr.length + 2;
Stefano Panella5b377172008-12-12 13:00:06 +0000151 }
David Vrabel8cc13a02008-09-17 16:34:09 +0100152 }
153 }
David Vrabel8cc13a02008-09-17 16:34:09 +0100154
Thomas Pugliesed339a1f2014-04-28 14:53:25 -0500155 result = uwb_rc_cmd_async(rc, "SET-DRP-IE",
156 &cmd->rccb, sizeof(*cmd) + num_bytes,
157 UWB_RC_CET_GENERAL, UWB_RC_CMD_SET_DRP_IE,
158 uwb_rc_set_drp_cmd_done, NULL);
159
Stefano Panella5b377172008-12-12 13:00:06 +0000160 rc->set_drp_ie_pending = 1;
161
David Vrabel8cc13a02008-09-17 16:34:09 +0100162 kfree(cmd);
163error:
164 return result;
David Vrabel8cc13a02008-09-17 16:34:09 +0100165}
166
Stefano Panella5b377172008-12-12 13:00:06 +0000167/*
168 * Evaluate the action to perform using conflict resolution rules
169 *
170 * Return a uwb_drp_conflict_action.
171 */
172static int evaluate_conflict_action(struct uwb_ie_drp *ext_drp_ie, int ext_beacon_slot,
173 struct uwb_rsv *rsv, int our_status)
David Vrabel8cc13a02008-09-17 16:34:09 +0100174{
Stefano Panella5b377172008-12-12 13:00:06 +0000175 int our_tie_breaker = rsv->tiebreaker;
176 int our_type = rsv->type;
177 int our_beacon_slot = rsv->rc->uwb_dev.beacon_slot;
David Vrabel8cc13a02008-09-17 16:34:09 +0100178
Stefano Panella5b377172008-12-12 13:00:06 +0000179 int ext_tie_breaker = uwb_ie_drp_tiebreaker(ext_drp_ie);
180 int ext_status = uwb_ie_drp_status(ext_drp_ie);
181 int ext_type = uwb_ie_drp_type(ext_drp_ie);
Thomas Pugliesed339a1f2014-04-28 14:53:25 -0500182
183
Stefano Panella5b377172008-12-12 13:00:06 +0000184 /* [ECMA-368 2nd Edition] 17.4.6 */
185 if (ext_type == UWB_DRP_TYPE_PCA && our_type == UWB_DRP_TYPE_PCA) {
186 return UWB_DRP_CONFLICT_MANTAIN;
David Vrabel8cc13a02008-09-17 16:34:09 +0100187 }
Stefano Panella5b377172008-12-12 13:00:06 +0000188
189 /* [ECMA-368 2nd Edition] 17.4.6-1 */
190 if (our_type == UWB_DRP_TYPE_ALIEN_BP) {
191 return UWB_DRP_CONFLICT_MANTAIN;
192 }
Thomas Pugliesed339a1f2014-04-28 14:53:25 -0500193
Stefano Panella5b377172008-12-12 13:00:06 +0000194 /* [ECMA-368 2nd Edition] 17.4.6-2 */
195 if (ext_type == UWB_DRP_TYPE_ALIEN_BP) {
196 /* here we know our_type != UWB_DRP_TYPE_ALIEN_BP */
197 return UWB_DRP_CONFLICT_ACT1;
198 }
199
200 /* [ECMA-368 2nd Edition] 17.4.6-3 */
201 if (our_status == 0 && ext_status == 1) {
202 return UWB_DRP_CONFLICT_ACT2;
203 }
204
205 /* [ECMA-368 2nd Edition] 17.4.6-4 */
206 if (our_status == 1 && ext_status == 0) {
207 return UWB_DRP_CONFLICT_MANTAIN;
208 }
209
210 /* [ECMA-368 2nd Edition] 17.4.6-5a */
211 if (our_tie_breaker == ext_tie_breaker &&
212 our_beacon_slot < ext_beacon_slot) {
213 return UWB_DRP_CONFLICT_MANTAIN;
214 }
215
216 /* [ECMA-368 2nd Edition] 17.4.6-5b */
217 if (our_tie_breaker != ext_tie_breaker &&
218 our_beacon_slot > ext_beacon_slot) {
219 return UWB_DRP_CONFLICT_MANTAIN;
220 }
Thomas Pugliesed339a1f2014-04-28 14:53:25 -0500221
Stefano Panella5b377172008-12-12 13:00:06 +0000222 if (our_status == 0) {
223 if (our_tie_breaker == ext_tie_breaker) {
224 /* [ECMA-368 2nd Edition] 17.4.6-6a */
225 if (our_beacon_slot > ext_beacon_slot) {
226 return UWB_DRP_CONFLICT_ACT2;
227 }
228 } else {
229 /* [ECMA-368 2nd Edition] 17.4.6-6b */
230 if (our_beacon_slot < ext_beacon_slot) {
231 return UWB_DRP_CONFLICT_ACT2;
232 }
233 }
234 } else {
235 if (our_tie_breaker == ext_tie_breaker) {
236 /* [ECMA-368 2nd Edition] 17.4.6-7a */
237 if (our_beacon_slot > ext_beacon_slot) {
238 return UWB_DRP_CONFLICT_ACT3;
239 }
240 } else {
241 /* [ECMA-368 2nd Edition] 17.4.6-7b */
242 if (our_beacon_slot < ext_beacon_slot) {
243 return UWB_DRP_CONFLICT_ACT3;
244 }
245 }
246 }
247 return UWB_DRP_CONFLICT_MANTAIN;
David Vrabel8cc13a02008-09-17 16:34:09 +0100248}
249
Thomas Pugliesed339a1f2014-04-28 14:53:25 -0500250static void handle_conflict_normal(struct uwb_ie_drp *drp_ie,
251 int ext_beacon_slot,
252 struct uwb_rsv *rsv,
Stefano Panella5b377172008-12-12 13:00:06 +0000253 struct uwb_mas_bm *conflicting_mas)
254{
255 struct uwb_rc *rc = rsv->rc;
256 struct uwb_rsv_move *mv = &rsv->mv;
257 struct uwb_drp_backoff_win *bow = &rc->bow;
258 int action;
259
260 action = evaluate_conflict_action(drp_ie, ext_beacon_slot, rsv, uwb_rsv_status(rsv));
261
262 if (uwb_rsv_is_owner(rsv)) {
263 switch(action) {
264 case UWB_DRP_CONFLICT_ACT2:
265 /* try move */
266 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_TO_BE_MOVED);
267 if (bow->can_reserve_extra_mases == false)
268 uwb_rsv_backoff_win_increment(rc);
Thomas Pugliesed339a1f2014-04-28 14:53:25 -0500269
Stefano Panella5b377172008-12-12 13:00:06 +0000270 break;
271 case UWB_DRP_CONFLICT_ACT3:
272 uwb_rsv_backoff_win_increment(rc);
273 /* drop some mases with reason modified */
274 /* put in the companion the mases to be dropped */
275 bitmap_and(mv->companion_mas.bm, rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS);
276 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED);
277 default:
278 break;
279 }
280 } else {
281 switch(action) {
282 case UWB_DRP_CONFLICT_ACT2:
283 case UWB_DRP_CONFLICT_ACT3:
Thomas Pugliesed339a1f2014-04-28 14:53:25 -0500284 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT);
Stefano Panella5b377172008-12-12 13:00:06 +0000285 default:
286 break;
287 }
288
289 }
Thomas Pugliesed339a1f2014-04-28 14:53:25 -0500290
Stefano Panella5b377172008-12-12 13:00:06 +0000291}
292
293static void handle_conflict_expanding(struct uwb_ie_drp *drp_ie, int ext_beacon_slot,
294 struct uwb_rsv *rsv, bool companion_only,
295 struct uwb_mas_bm *conflicting_mas)
296{
297 struct uwb_rc *rc = rsv->rc;
298 struct uwb_drp_backoff_win *bow = &rc->bow;
299 struct uwb_rsv_move *mv = &rsv->mv;
300 int action;
Thomas Pugliesed339a1f2014-04-28 14:53:25 -0500301
Stefano Panella5b377172008-12-12 13:00:06 +0000302 if (companion_only) {
303 /* status of companion is 0 at this point */
304 action = evaluate_conflict_action(drp_ie, ext_beacon_slot, rsv, 0);
305 if (uwb_rsv_is_owner(rsv)) {
306 switch(action) {
307 case UWB_DRP_CONFLICT_ACT2:
308 case UWB_DRP_CONFLICT_ACT3:
Thomas Pugliesed339a1f2014-04-28 14:53:25 -0500309 uwb_rsv_set_state(rsv,
310 UWB_RSV_STATE_O_ESTABLISHED);
Stefano Panella5b377172008-12-12 13:00:06 +0000311 rsv->needs_release_companion_mas = false;
312 if (bow->can_reserve_extra_mases == false)
313 uwb_rsv_backoff_win_increment(rc);
Thomas Pugliesed339a1f2014-04-28 14:53:25 -0500314 uwb_drp_avail_release(rsv->rc,
315 &rsv->mv.companion_mas);
Stefano Panella5b377172008-12-12 13:00:06 +0000316 }
Thomas Pugliesed339a1f2014-04-28 14:53:25 -0500317 } else { /* rsv is target */
Stefano Panella5b377172008-12-12 13:00:06 +0000318 switch(action) {
319 case UWB_DRP_CONFLICT_ACT2:
320 case UWB_DRP_CONFLICT_ACT3:
Thomas Pugliesed339a1f2014-04-28 14:53:25 -0500321 uwb_rsv_set_state(rsv,
322 UWB_RSV_STATE_T_EXPANDING_CONFLICT);
Stefano Panella5b377172008-12-12 13:00:06 +0000323 /* send_drp_avail_ie = true; */
324 }
325 }
Thomas Pugliesed339a1f2014-04-28 14:53:25 -0500326 } else { /* also base part of the reservation is conflicting */
Stefano Panella5b377172008-12-12 13:00:06 +0000327 if (uwb_rsv_is_owner(rsv)) {
328 uwb_rsv_backoff_win_increment(rc);
329 /* remove companion part */
330 uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas);
331
332 /* drop some mases with reason modified */
333
334 /* put in the companion the mases to be dropped */
Thomas Pugliesed339a1f2014-04-28 14:53:25 -0500335 bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm,
336 conflicting_mas->bm, UWB_NUM_MAS);
Stefano Panella5b377172008-12-12 13:00:06 +0000337 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED);
338 } else { /* it is a target rsv */
339 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT);
340 /* send_drp_avail_ie = true; */
341 }
342 }
343}
344
345static void uwb_drp_handle_conflict_rsv(struct uwb_rc *rc, struct uwb_rsv *rsv,
Thomas Pugliesed339a1f2014-04-28 14:53:25 -0500346 struct uwb_rc_evt_drp *drp_evt,
Stefano Panella5b377172008-12-12 13:00:06 +0000347 struct uwb_ie_drp *drp_ie,
348 struct uwb_mas_bm *conflicting_mas)
349{
350 struct uwb_rsv_move *mv;
351
352 /* check if the conflicting reservation has two drp_ies */
353 if (uwb_rsv_has_two_drp_ies(rsv)) {
354 mv = &rsv->mv;
Thomas Pugliesed339a1f2014-04-28 14:53:25 -0500355 if (bitmap_intersects(rsv->mas.bm, conflicting_mas->bm,
356 UWB_NUM_MAS)) {
357 handle_conflict_expanding(drp_ie,
358 drp_evt->beacon_slot_number,
359 rsv, false, conflicting_mas);
Stefano Panella5b377172008-12-12 13:00:06 +0000360 } else {
Thomas Pugliesed339a1f2014-04-28 14:53:25 -0500361 if (bitmap_intersects(mv->companion_mas.bm,
362 conflicting_mas->bm, UWB_NUM_MAS)) {
363 handle_conflict_expanding(
364 drp_ie, drp_evt->beacon_slot_number,
365 rsv, true, conflicting_mas);
Stefano Panella5b377172008-12-12 13:00:06 +0000366 }
367 }
Thomas Pugliesed339a1f2014-04-28 14:53:25 -0500368 } else if (bitmap_intersects(rsv->mas.bm, conflicting_mas->bm,
369 UWB_NUM_MAS)) {
370 handle_conflict_normal(drp_ie, drp_evt->beacon_slot_number,
371 rsv, conflicting_mas);
Stefano Panella5b377172008-12-12 13:00:06 +0000372 }
373}
374
375static void uwb_drp_handle_all_conflict_rsv(struct uwb_rc *rc,
Thomas Pugliesed339a1f2014-04-28 14:53:25 -0500376 struct uwb_rc_evt_drp *drp_evt,
Stefano Panella5b377172008-12-12 13:00:06 +0000377 struct uwb_ie_drp *drp_ie,
378 struct uwb_mas_bm *conflicting_mas)
379{
380 struct uwb_rsv *rsv;
Thomas Pugliesed339a1f2014-04-28 14:53:25 -0500381
Stefano Panella5b377172008-12-12 13:00:06 +0000382 list_for_each_entry(rsv, &rc->reservations, rc_node) {
Thomas Pugliesed339a1f2014-04-28 14:53:25 -0500383 uwb_drp_handle_conflict_rsv(rc, rsv, drp_evt, drp_ie,
384 conflicting_mas);
Stefano Panella5b377172008-12-12 13:00:06 +0000385 }
386}
Thomas Pugliesed339a1f2014-04-28 14:53:25 -0500387
388static void uwb_drp_process_target_accepted(struct uwb_rc *rc,
389 struct uwb_rsv *rsv, struct uwb_rc_evt_drp *drp_evt,
390 struct uwb_ie_drp *drp_ie, struct uwb_mas_bm *mas)
391{
392 struct uwb_rsv_move *mv = &rsv->mv;
393 int status;
394
395 status = uwb_ie_drp_status(drp_ie);
396
397 if (rsv->state == UWB_RSV_STATE_T_CONFLICT) {
398 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT);
399 return;
400 }
401
402 if (rsv->state == UWB_RSV_STATE_T_EXPANDING_ACCEPTED) {
403 /* drp_ie is companion */
404 if (!bitmap_equal(rsv->mas.bm, mas->bm, UWB_NUM_MAS)) {
405 /* stroke companion */
406 uwb_rsv_set_state(rsv,
407 UWB_RSV_STATE_T_EXPANDING_ACCEPTED);
408 }
409 } else {
410 if (!bitmap_equal(rsv->mas.bm, mas->bm, UWB_NUM_MAS)) {
411 if (uwb_drp_avail_reserve_pending(rc, mas) == -EBUSY) {
412 /* FIXME: there is a conflict, find
413 * the conflicting reservations and
414 * take a sensible action. Consider
415 * that in drp_ie there is the
416 * "neighbour" */
417 uwb_drp_handle_all_conflict_rsv(rc, drp_evt,
418 drp_ie, mas);
419 } else {
420 /* accept the extra reservation */
421 bitmap_copy(mv->companion_mas.bm, mas->bm,
422 UWB_NUM_MAS);
423 uwb_rsv_set_state(rsv,
424 UWB_RSV_STATE_T_EXPANDING_ACCEPTED);
425 }
426 } else {
427 if (status) {
428 uwb_rsv_set_state(rsv,
429 UWB_RSV_STATE_T_ACCEPTED);
430 }
431 }
432
433 }
434}
435
David Vrabel8cc13a02008-09-17 16:34:09 +0100436/*
437 * Based on the DRP IE, transition a target reservation to a new
438 * state.
439 */
440static void uwb_drp_process_target(struct uwb_rc *rc, struct uwb_rsv *rsv,
Thomas Pugliesed339a1f2014-04-28 14:53:25 -0500441 struct uwb_ie_drp *drp_ie, struct uwb_rc_evt_drp *drp_evt)
David Vrabel8cc13a02008-09-17 16:34:09 +0100442{
443 struct device *dev = &rc->uwb_dev.dev;
Stefano Panella5b377172008-12-12 13:00:06 +0000444 struct uwb_rsv_move *mv = &rsv->mv;
David Vrabel8cc13a02008-09-17 16:34:09 +0100445 int status;
446 enum uwb_drp_reason reason_code;
Stefano Panella5b377172008-12-12 13:00:06 +0000447 struct uwb_mas_bm mas;
Thomas Pugliesed339a1f2014-04-28 14:53:25 -0500448
David Vrabel8cc13a02008-09-17 16:34:09 +0100449 status = uwb_ie_drp_status(drp_ie);
450 reason_code = uwb_ie_drp_reason_code(drp_ie);
Stefano Panella5b377172008-12-12 13:00:06 +0000451 uwb_drp_ie_to_bm(&mas, drp_ie);
David Vrabel8cc13a02008-09-17 16:34:09 +0100452
Stefano Panella5b377172008-12-12 13:00:06 +0000453 switch (reason_code) {
454 case UWB_DRP_REASON_ACCEPTED:
Thomas Pugliesed339a1f2014-04-28 14:53:25 -0500455 uwb_drp_process_target_accepted(rc, rsv, drp_evt, drp_ie, &mas);
Stefano Panella5b377172008-12-12 13:00:06 +0000456 break;
457
458 case UWB_DRP_REASON_MODIFIED:
459 /* check to see if we have already modified the reservation */
460 if (bitmap_equal(rsv->mas.bm, mas.bm, UWB_NUM_MAS)) {
David Vrabel8cc13a02008-09-17 16:34:09 +0100461 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED);
462 break;
David Vrabel8cc13a02008-09-17 16:34:09 +0100463 }
Stefano Panella5b377172008-12-12 13:00:06 +0000464
465 /* find if the owner wants to expand or reduce */
466 if (bitmap_subset(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) {
467 /* owner is reducing */
Thomas Pugliesed339a1f2014-04-28 14:53:25 -0500468 bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, mas.bm,
469 UWB_NUM_MAS);
Stefano Panella5b377172008-12-12 13:00:06 +0000470 uwb_drp_avail_release(rsv->rc, &mv->companion_mas);
David Vrabel8cc13a02008-09-17 16:34:09 +0100471 }
Stefano Panella5b377172008-12-12 13:00:06 +0000472
473 bitmap_copy(rsv->mas.bm, mas.bm, UWB_NUM_MAS);
474 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_RESIZED);
475 break;
476 default:
477 dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
478 reason_code, status);
David Vrabel8cc13a02008-09-17 16:34:09 +0100479 }
480}
481
Thomas Pugliesed339a1f2014-04-28 14:53:25 -0500482static void uwb_drp_process_owner_accepted(struct uwb_rsv *rsv,
483 struct uwb_mas_bm *mas)
484{
485 struct uwb_rsv_move *mv = &rsv->mv;
486
487 switch (rsv->state) {
488 case UWB_RSV_STATE_O_PENDING:
489 case UWB_RSV_STATE_O_INITIATED:
490 case UWB_RSV_STATE_O_ESTABLISHED:
491 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
492 break;
493 case UWB_RSV_STATE_O_MODIFIED:
494 if (bitmap_equal(mas->bm, rsv->mas.bm, UWB_NUM_MAS))
495 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
496 else
497 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED);
498 break;
499
500 case UWB_RSV_STATE_O_MOVE_REDUCING: /* shouldn' t be a problem */
501 if (bitmap_equal(mas->bm, rsv->mas.bm, UWB_NUM_MAS))
502 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
503 else
504 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING);
505 break;
506 case UWB_RSV_STATE_O_MOVE_EXPANDING:
507 if (bitmap_equal(mas->bm, mv->companion_mas.bm, UWB_NUM_MAS)) {
508 /* Companion reservation accepted */
509 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING);
510 } else {
511 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING);
512 }
513 break;
514 case UWB_RSV_STATE_O_MOVE_COMBINING:
515 if (bitmap_equal(mas->bm, rsv->mas.bm, UWB_NUM_MAS))
516 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING);
517 else
518 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING);
519 break;
520 default:
521 break;
522 }
523}
David Vrabel8cc13a02008-09-17 16:34:09 +0100524/*
525 * Based on the DRP IE, transition an owner reservation to a new
526 * state.
527 */
528static void uwb_drp_process_owner(struct uwb_rc *rc, struct uwb_rsv *rsv,
Stefano Panella5b377172008-12-12 13:00:06 +0000529 struct uwb_dev *src, struct uwb_ie_drp *drp_ie,
530 struct uwb_rc_evt_drp *drp_evt)
David Vrabel8cc13a02008-09-17 16:34:09 +0100531{
532 struct device *dev = &rc->uwb_dev.dev;
533 int status;
534 enum uwb_drp_reason reason_code;
Stefano Panella5b377172008-12-12 13:00:06 +0000535 struct uwb_mas_bm mas;
David Vrabel8cc13a02008-09-17 16:34:09 +0100536
537 status = uwb_ie_drp_status(drp_ie);
538 reason_code = uwb_ie_drp_reason_code(drp_ie);
Stefano Panella5b377172008-12-12 13:00:06 +0000539 uwb_drp_ie_to_bm(&mas, drp_ie);
David Vrabel8cc13a02008-09-17 16:34:09 +0100540
541 if (status) {
542 switch (reason_code) {
543 case UWB_DRP_REASON_ACCEPTED:
Thomas Pugliesed339a1f2014-04-28 14:53:25 -0500544 uwb_drp_process_owner_accepted(rsv, &mas);
David Vrabel8cc13a02008-09-17 16:34:09 +0100545 break;
546 default:
547 dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
548 reason_code, status);
549 }
550 } else {
551 switch (reason_code) {
552 case UWB_DRP_REASON_PENDING:
553 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_PENDING);
554 break;
555 case UWB_DRP_REASON_DENIED:
556 uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
557 break;
558 case UWB_DRP_REASON_CONFLICT:
Stefano Panella5b377172008-12-12 13:00:06 +0000559 /* resolve the conflict */
560 bitmap_complement(mas.bm, src->last_availability_bm,
561 UWB_NUM_MAS);
562 uwb_drp_handle_conflict_rsv(rc, rsv, drp_evt, drp_ie, &mas);
David Vrabel8cc13a02008-09-17 16:34:09 +0100563 break;
564 default:
565 dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
566 reason_code, status);
567 }
568 }
569}
570
Stefano Panella5b377172008-12-12 13:00:06 +0000571static void uwb_cnflt_alien_stroke_timer(struct uwb_cnflt_alien *cnflt)
572{
573 unsigned timeout_us = UWB_MAX_LOST_BEACONS * UWB_SUPERFRAME_LENGTH_US;
574 mod_timer(&cnflt->timer, jiffies + usecs_to_jiffies(timeout_us));
575}
576
577static void uwb_cnflt_update_work(struct work_struct *work)
578{
579 struct uwb_cnflt_alien *cnflt = container_of(work,
580 struct uwb_cnflt_alien,
581 cnflt_update_work);
582 struct uwb_cnflt_alien *c;
583 struct uwb_rc *rc = cnflt->rc;
Thomas Pugliesed339a1f2014-04-28 14:53:25 -0500584
Stefano Panella5b377172008-12-12 13:00:06 +0000585 unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE;
Thomas Pugliesed339a1f2014-04-28 14:53:25 -0500586
Stefano Panella5b377172008-12-12 13:00:06 +0000587 mutex_lock(&rc->rsvs_mutex);
588
589 list_del(&cnflt->rc_node);
590
591 /* update rc global conflicting alien bitmap */
592 bitmap_zero(rc->cnflt_alien_bitmap.bm, UWB_NUM_MAS);
593
594 list_for_each_entry(c, &rc->cnflt_alien_list, rc_node) {
Thomas Pugliesed339a1f2014-04-28 14:53:25 -0500595 bitmap_or(rc->cnflt_alien_bitmap.bm, rc->cnflt_alien_bitmap.bm,
596 c->mas.bm, UWB_NUM_MAS);
Stefano Panella5b377172008-12-12 13:00:06 +0000597 }
Thomas Pugliesed339a1f2014-04-28 14:53:25 -0500598
599 queue_delayed_work(rc->rsv_workq, &rc->rsv_alien_bp_work,
600 usecs_to_jiffies(delay_us));
Stefano Panella5b377172008-12-12 13:00:06 +0000601
602 kfree(cnflt);
603 mutex_unlock(&rc->rsvs_mutex);
604}
605
606static void uwb_cnflt_timer(unsigned long arg)
607{
608 struct uwb_cnflt_alien *cnflt = (struct uwb_cnflt_alien *)arg;
609
610 queue_work(cnflt->rc->rsv_workq, &cnflt->cnflt_update_work);
611}
612
David Vrabel8cc13a02008-09-17 16:34:09 +0100613/*
Stefano Panella5b377172008-12-12 13:00:06 +0000614 * We have received an DRP_IE of type Alien BP and we need to make
615 * sure we do not transmit in conflicting MASs.
David Vrabel8cc13a02008-09-17 16:34:09 +0100616 */
Stefano Panella5b377172008-12-12 13:00:06 +0000617static void uwb_drp_handle_alien_drp(struct uwb_rc *rc, struct uwb_ie_drp *drp_ie)
618{
619 struct device *dev = &rc->uwb_dev.dev;
620 struct uwb_mas_bm mas;
621 struct uwb_cnflt_alien *cnflt;
Stefano Panella5b377172008-12-12 13:00:06 +0000622 unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE;
Thomas Pugliesed339a1f2014-04-28 14:53:25 -0500623
Stefano Panella5b377172008-12-12 13:00:06 +0000624 uwb_drp_ie_to_bm(&mas, drp_ie);
Thomas Pugliesed339a1f2014-04-28 14:53:25 -0500625
Stefano Panella5b377172008-12-12 13:00:06 +0000626 list_for_each_entry(cnflt, &rc->cnflt_alien_list, rc_node) {
627 if (bitmap_equal(cnflt->mas.bm, mas.bm, UWB_NUM_MAS)) {
628 /* Existing alien BP reservation conflicting
629 * bitmap, just reset the timer */
630 uwb_cnflt_alien_stroke_timer(cnflt);
631 return;
632 }
633 }
634
635 /* New alien BP reservation conflicting bitmap */
636
637 /* alloc and initialize new uwb_cnflt_alien */
638 cnflt = kzalloc(sizeof(struct uwb_cnflt_alien), GFP_KERNEL);
Daeseok Younf7a87192014-04-16 18:48:02 +0900639 if (!cnflt) {
Stefano Panella5b377172008-12-12 13:00:06 +0000640 dev_err(dev, "failed to alloc uwb_cnflt_alien struct\n");
Daeseok Younf7a87192014-04-16 18:48:02 +0900641 return;
642 }
643
Stefano Panella5b377172008-12-12 13:00:06 +0000644 INIT_LIST_HEAD(&cnflt->rc_node);
Muhammad Falak R Wanicf0d5fb2015-10-20 23:58:17 +0530645 setup_timer(&cnflt->timer, uwb_cnflt_timer, (unsigned long)cnflt);
Stefano Panella5b377172008-12-12 13:00:06 +0000646
647 cnflt->rc = rc;
648 INIT_WORK(&cnflt->cnflt_update_work, uwb_cnflt_update_work);
Thomas Pugliesed339a1f2014-04-28 14:53:25 -0500649
Stefano Panella5b377172008-12-12 13:00:06 +0000650 bitmap_copy(cnflt->mas.bm, mas.bm, UWB_NUM_MAS);
651
652 list_add_tail(&cnflt->rc_node, &rc->cnflt_alien_list);
653
654 /* update rc global conflicting alien bitmap */
655 bitmap_or(rc->cnflt_alien_bitmap.bm, rc->cnflt_alien_bitmap.bm, mas.bm, UWB_NUM_MAS);
656
657 queue_delayed_work(rc->rsv_workq, &rc->rsv_alien_bp_work, usecs_to_jiffies(delay_us));
Thomas Pugliesed339a1f2014-04-28 14:53:25 -0500658
Stefano Panella5b377172008-12-12 13:00:06 +0000659 /* start the timer */
660 uwb_cnflt_alien_stroke_timer(cnflt);
661}
662
663static void uwb_drp_process_not_involved(struct uwb_rc *rc,
Thomas Pugliesed339a1f2014-04-28 14:53:25 -0500664 struct uwb_rc_evt_drp *drp_evt,
Stefano Panella5b377172008-12-12 13:00:06 +0000665 struct uwb_ie_drp *drp_ie)
666{
667 struct uwb_mas_bm mas;
Thomas Pugliesed339a1f2014-04-28 14:53:25 -0500668
Stefano Panella5b377172008-12-12 13:00:06 +0000669 uwb_drp_ie_to_bm(&mas, drp_ie);
670 uwb_drp_handle_all_conflict_rsv(rc, drp_evt, drp_ie, &mas);
671}
672
673static void uwb_drp_process_involved(struct uwb_rc *rc, struct uwb_dev *src,
674 struct uwb_rc_evt_drp *drp_evt,
675 struct uwb_ie_drp *drp_ie)
David Vrabel8cc13a02008-09-17 16:34:09 +0100676{
677 struct uwb_rsv *rsv;
678
679 rsv = uwb_rsv_find(rc, src, drp_ie);
680 if (!rsv) {
681 /*
682 * No reservation? It's either for a recently
683 * terminated reservation; or the DRP IE couldn't be
684 * processed (e.g., an invalid IE or out of memory).
685 */
686 return;
687 }
Thomas Pugliesed339a1f2014-04-28 14:53:25 -0500688
David Vrabel8cc13a02008-09-17 16:34:09 +0100689 /*
690 * Do nothing with DRP IEs for reservations that have been
691 * terminated.
692 */
693 if (rsv->state == UWB_RSV_STATE_NONE) {
694 uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
695 return;
696 }
Thomas Pugliesed339a1f2014-04-28 14:53:25 -0500697
David Vrabel8cc13a02008-09-17 16:34:09 +0100698 if (uwb_ie_drp_owner(drp_ie))
Stefano Panella5b377172008-12-12 13:00:06 +0000699 uwb_drp_process_target(rc, rsv, drp_ie, drp_evt);
David Vrabel8cc13a02008-09-17 16:34:09 +0100700 else
Stefano Panella5b377172008-12-12 13:00:06 +0000701 uwb_drp_process_owner(rc, rsv, src, drp_ie, drp_evt);
Thomas Pugliesed339a1f2014-04-28 14:53:25 -0500702
David Vrabel8cc13a02008-09-17 16:34:09 +0100703}
704
705
Stefano Panella5b377172008-12-12 13:00:06 +0000706static bool uwb_drp_involves_us(struct uwb_rc *rc, struct uwb_ie_drp *drp_ie)
707{
708 return uwb_dev_addr_cmp(&rc->uwb_dev.dev_addr, &drp_ie->dev_addr) == 0;
709}
710
711/*
712 * Process a received DRP IE.
713 */
714static void uwb_drp_process(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt,
715 struct uwb_dev *src, struct uwb_ie_drp *drp_ie)
716{
717 if (uwb_ie_drp_type(drp_ie) == UWB_DRP_TYPE_ALIEN_BP)
718 uwb_drp_handle_alien_drp(rc, drp_ie);
719 else if (uwb_drp_involves_us(rc, drp_ie))
720 uwb_drp_process_involved(rc, src, drp_evt, drp_ie);
721 else
722 uwb_drp_process_not_involved(rc, drp_evt, drp_ie);
723}
724
725/*
726 * Process a received DRP Availability IE
727 */
728static void uwb_drp_availability_process(struct uwb_rc *rc, struct uwb_dev *src,
729 struct uwb_ie_drp_avail *drp_availability_ie)
730{
731 bitmap_copy(src->last_availability_bm,
732 drp_availability_ie->bmp, UWB_NUM_MAS);
733}
734
David Vrabel8cc13a02008-09-17 16:34:09 +0100735/*
736 * Process all the DRP IEs (both DRP IEs and the DRP Availability IE)
737 * from a device.
738 */
739static
740void uwb_drp_process_all(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt,
741 size_t ielen, struct uwb_dev *src_dev)
742{
743 struct device *dev = &rc->uwb_dev.dev;
744 struct uwb_ie_hdr *ie_hdr;
745 void *ptr;
746
747 ptr = drp_evt->ie_data;
748 for (;;) {
749 ie_hdr = uwb_ie_next(&ptr, &ielen);
750 if (!ie_hdr)
751 break;
752
753 switch (ie_hdr->element_id) {
754 case UWB_IE_DRP_AVAILABILITY:
Stefano Panella5b377172008-12-12 13:00:06 +0000755 uwb_drp_availability_process(rc, src_dev, (struct uwb_ie_drp_avail *)ie_hdr);
David Vrabel8cc13a02008-09-17 16:34:09 +0100756 break;
757 case UWB_IE_DRP:
Stefano Panella5b377172008-12-12 13:00:06 +0000758 uwb_drp_process(rc, drp_evt, src_dev, (struct uwb_ie_drp *)ie_hdr);
David Vrabel8cc13a02008-09-17 16:34:09 +0100759 break;
760 default:
761 dev_warn(dev, "unexpected IE in DRP notification\n");
762 break;
763 }
764 }
765
766 if (ielen > 0)
767 dev_warn(dev, "%d octets remaining in DRP notification\n",
768 (int)ielen);
769}
770
David Vrabel8cc13a02008-09-17 16:34:09 +0100771/**
772 * uwbd_evt_handle_rc_drp - handle a DRP_IE event
773 * @evt: the DRP_IE event from the radio controller
774 *
775 * This processes DRP notifications from the radio controller, either
776 * initiating a new reservation or transitioning an existing
777 * reservation into a different state.
778 *
779 * DRP notifications can occur for three different reasons:
780 *
781 * - UWB_DRP_NOTIF_DRP_IE_RECVD: one or more DRP IEs with the RC as
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300782 * the target or source have been received.
David Vrabel8cc13a02008-09-17 16:34:09 +0100783 *
784 * These DRP IEs could be new or for an existing reservation.
785 *
786 * If the DRP IE for an existing reservation ceases to be to
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300787 * received for at least mMaxLostBeacons, the reservation should be
David Vrabel8cc13a02008-09-17 16:34:09 +0100788 * considered to be terminated. Note that the TERMINATE reason (see
789 * below) may not always be signalled (e.g., the remote device has
790 * two or more reservations established with the RC).
791 *
792 * - UWB_DRP_NOTIF_CONFLICT: DRP IEs from any device in the beacon
793 * group conflict with the RC's reservations.
794 *
795 * - UWB_DRP_NOTIF_TERMINATE: DRP IEs are no longer being received
796 * from a device (i.e., it's terminated all reservations).
797 *
798 * Only the software state of the reservations is changed; the setting
799 * of the radio controller's DRP IEs is done after all the events in
800 * an event buffer are processed. This saves waiting multiple times
801 * for the SET_DRP_IE command to complete.
802 */
803int uwbd_evt_handle_rc_drp(struct uwb_event *evt)
804{
805 struct device *dev = &evt->rc->uwb_dev.dev;
806 struct uwb_rc *rc = evt->rc;
807 struct uwb_rc_evt_drp *drp_evt;
808 size_t ielength, bytes_left;
809 struct uwb_dev_addr src_addr;
810 struct uwb_dev *src_dev;
David Vrabel8cc13a02008-09-17 16:34:09 +0100811
812 /* Is there enough data to decode the event (and any IEs in
813 its payload)? */
814 if (evt->notif.size < sizeof(*drp_evt)) {
815 dev_err(dev, "DRP event: Not enough data to decode event "
816 "[%zu bytes left, %zu needed]\n",
817 evt->notif.size, sizeof(*drp_evt));
818 return 0;
819 }
820 bytes_left = evt->notif.size - sizeof(*drp_evt);
821 drp_evt = container_of(evt->notif.rceb, struct uwb_rc_evt_drp, rceb);
822 ielength = le16_to_cpu(drp_evt->ie_length);
823 if (bytes_left != ielength) {
824 dev_err(dev, "DRP event: Not enough data in payload [%zu"
825 "bytes left, %zu declared in the event]\n",
826 bytes_left, ielength);
827 return 0;
828 }
829
830 memcpy(src_addr.data, &drp_evt->src_addr, sizeof(src_addr));
831 src_dev = uwb_dev_get_by_devaddr(rc, &src_addr);
832 if (!src_dev) {
833 /*
834 * A DRP notification from an unrecognized device.
835 *
836 * This is probably from a WUSB device that doesn't
837 * have an EUI-48 and therefore doesn't show up in the
838 * UWB device database. It's safe to simply ignore
839 * these.
840 */
841 return 0;
842 }
843
844 mutex_lock(&rc->rsvs_mutex);
845
Stefano Panella5b377172008-12-12 13:00:06 +0000846 /* We do not distinguish from the reason */
847 uwb_drp_process_all(rc, drp_evt, ielength, src_dev);
David Vrabel8cc13a02008-09-17 16:34:09 +0100848
849 mutex_unlock(&rc->rsvs_mutex);
850
851 uwb_dev_put(src_dev);
852 return 0;
853}