Hansong Zhang | b096076 | 2019-11-14 17:57:10 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2019 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #include "l2cap/internal/enhanced_retransmission_mode_channel_data_controller.h" |
| 18 | |
Hansong Zhang | 6594194 | 2019-11-16 23:48:30 -0800 | [diff] [blame] | 19 | #include <map> |
| 20 | #include <queue> |
| 21 | #include <vector> |
| 22 | |
| 23 | #include "common/bind.h" |
| 24 | #include "os/alarm.h" |
| 25 | #include "packet/fragmenting_inserter.h" |
| 26 | #include "packet/raw_builder.h" |
| 27 | |
Hansong Zhang | b096076 | 2019-11-14 17:57:10 -0800 | [diff] [blame] | 28 | namespace bluetooth { |
| 29 | namespace l2cap { |
| 30 | namespace internal { |
| 31 | ErtmController::ErtmController(Cid cid, Cid remote_cid, UpperQueueDownEnd* channel_queue_end, os::Handler* handler, |
| 32 | Scheduler* scheduler) |
Hansong Zhang | 6594194 | 2019-11-16 23:48:30 -0800 | [diff] [blame] | 33 | : cid_(cid), remote_cid_(remote_cid), enqueue_buffer_(channel_queue_end), handler_(handler), scheduler_(scheduler), |
| 34 | pimpl_(std::make_unique<impl>(this, handler)) {} |
Hansong Zhang | b096076 | 2019-11-14 17:57:10 -0800 | [diff] [blame] | 35 | |
Hansong Zhang | 6594194 | 2019-11-16 23:48:30 -0800 | [diff] [blame] | 36 | ErtmController::~ErtmController() = default; |
| 37 | |
| 38 | struct ErtmController::impl { |
| 39 | impl(ErtmController* controller, os::Handler* handler) |
| 40 | : controller_(controller), handler_(handler), retrans_timer_(handler), monitor_timer_(handler) {} |
| 41 | |
| 42 | ErtmController* controller_; |
| 43 | os::Handler* handler_; |
| 44 | |
| 45 | // We don't support extended window |
| 46 | static constexpr uint8_t kMaxTxWin = 64; |
| 47 | |
| 48 | // We don't support sending SREJ |
| 49 | static constexpr bool kSendSrej = false; |
| 50 | |
| 51 | // States (@see 8.6.5.2): Transmitter state and receiver state |
| 52 | |
| 53 | enum class TxState { |
| 54 | XMIT, |
| 55 | WAIT_F, |
| 56 | }; |
| 57 | TxState tx_state_ = TxState::XMIT; |
| 58 | |
| 59 | enum class RxState { |
| 60 | RECV, |
| 61 | REJ_SENT, |
| 62 | SREJ_SENT, |
| 63 | }; |
| 64 | RxState rx_state_ = RxState::RECV; |
| 65 | |
| 66 | // Variables and Timers (@see 8.6.5.3) |
| 67 | |
| 68 | uint8_t tx_seq_ = 0; |
| 69 | uint8_t next_tx_seq_ = 0; |
| 70 | uint8_t expected_ack_seq_ = 0; |
| 71 | uint8_t req_seq_ = 0; |
| 72 | uint8_t expected_tx_seq_ = 0; |
| 73 | uint8_t buffer_seq_ = 0; |
| 74 | |
| 75 | bool remote_busy_ = false; |
| 76 | bool local_busy_ = false; |
| 77 | int unacked_frames_ = 0; |
| 78 | // TODO: Instead of having a map, we may consider about a better data structure |
Hansong Zhang | a631253 | 2019-11-19 14:01:36 -0800 | [diff] [blame] | 79 | // Map from TxSeq to (SAR, SDU size for START packet, information payload) |
| 80 | std::map<uint8_t, std::tuple<SegmentationAndReassembly, uint16_t, std::shared_ptr<packet::RawBuilder>>> unacked_list_; |
| 81 | // Stores (SAR, SDU size for START packet, information payload) |
| 82 | std::queue<std::tuple<SegmentationAndReassembly, uint16_t, std::unique_ptr<packet::RawBuilder>>> pending_frames_; |
Hansong Zhang | 6594194 | 2019-11-16 23:48:30 -0800 | [diff] [blame] | 83 | int retry_count_ = 0; |
| 84 | std::map<uint8_t /* tx_seq, */, int /* count */> retry_i_frames_; |
| 85 | bool rnr_sent_ = false; |
| 86 | bool rej_actioned_ = false; |
| 87 | bool srej_actioned_ = false; |
| 88 | uint16_t srej_save_req_seq_ = 0; |
| 89 | bool send_rej_ = false; |
| 90 | int buffer_seq_srej_ = 0; |
| 91 | int frames_sent_ = 0; |
| 92 | os::Alarm retrans_timer_; |
| 93 | os::Alarm monitor_timer_; |
| 94 | |
| 95 | // Events (@see 8.6.5.4) |
| 96 | |
Hansong Zhang | a631253 | 2019-11-19 14:01:36 -0800 | [diff] [blame] | 97 | void data_request(SegmentationAndReassembly sar, std::unique_ptr<packet::RawBuilder> pdu, uint16_t sdu_size = 0) { |
| 98 | // Note: sdu_size only applies to START packet |
Hansong Zhang | 6594194 | 2019-11-16 23:48:30 -0800 | [diff] [blame] | 99 | if (tx_state_ == TxState::XMIT && !remote_busy() && rem_window_not_full()) { |
Hansong Zhang | a631253 | 2019-11-19 14:01:36 -0800 | [diff] [blame] | 100 | send_data(sar, sdu_size, std::move(pdu)); |
Hansong Zhang | 6594194 | 2019-11-16 23:48:30 -0800 | [diff] [blame] | 101 | } else if (tx_state_ == TxState::XMIT && (remote_busy() || rem_window_full())) { |
Hansong Zhang | a631253 | 2019-11-19 14:01:36 -0800 | [diff] [blame] | 102 | pend_data(sar, sdu_size, std::move(pdu)); |
Hansong Zhang | 6594194 | 2019-11-16 23:48:30 -0800 | [diff] [blame] | 103 | } else if (tx_state_ == TxState::WAIT_F) { |
Hansong Zhang | a631253 | 2019-11-19 14:01:36 -0800 | [diff] [blame] | 104 | pend_data(sar, sdu_size, std::move(pdu)); |
Hansong Zhang | 6594194 | 2019-11-16 23:48:30 -0800 | [diff] [blame] | 105 | } |
| 106 | } |
| 107 | |
| 108 | void local_busy_detected() { |
| 109 | local_busy_ = true; |
| 110 | } |
| 111 | |
| 112 | void local_busy_clear() { |
| 113 | if (tx_state_ == TxState::XMIT && rnr_sent()) { |
| 114 | local_busy_ = false; |
| 115 | rnr_sent_ = false; |
| 116 | send_rr(Poll::POLL); |
| 117 | retry_count_ = 1; |
| 118 | stop_retrans_timer(); |
| 119 | start_monitor_timer(); |
| 120 | } else if (tx_state_ == TxState::XMIT) { |
| 121 | local_busy_ = false; |
| 122 | rnr_sent_ = false; |
| 123 | } |
| 124 | } |
| 125 | |
| 126 | void recv_req_seq_and_f_bit(uint8_t req_seq, Final f) { |
| 127 | if (tx_state_ == TxState::XMIT) { |
| 128 | process_req_seq(req_seq); |
| 129 | } else if (f == Final::POLL_RESPONSE) { |
| 130 | process_req_seq(req_seq); |
| 131 | stop_monitor_timer(); |
| 132 | if (unacked_frames_ > 0) { |
| 133 | start_retrans_timer(); |
| 134 | } |
| 135 | tx_state_ = TxState::XMIT; |
| 136 | } else { |
| 137 | process_req_seq(req_seq); |
| 138 | } |
| 139 | } |
| 140 | |
| 141 | void recv_f_bit(Final f) { |
| 142 | if (tx_state_ == TxState::WAIT_F && f == Final::POLL_RESPONSE) { |
| 143 | stop_monitor_timer(); |
| 144 | if (unacked_frames_ > 0) { |
| 145 | start_retrans_timer(); |
| 146 | } |
| 147 | tx_state_ = TxState::XMIT; |
| 148 | } |
| 149 | } |
| 150 | |
| 151 | void retrans_timer_expires() { |
| 152 | if (tx_state_ == TxState::XMIT) { |
| 153 | send_rr_or_rnr(Poll::POLL); |
| 154 | // send rr or rnr(p=1) |
| 155 | retry_count_ = 1; |
| 156 | start_retrans_timer(); |
| 157 | tx_state_ = TxState::WAIT_F; |
| 158 | } |
| 159 | } |
| 160 | |
| 161 | void monitor_timer_expires() { |
| 162 | if (tx_state_ == TxState::WAIT_F && retry_count_less_than_max_transmit()) { |
| 163 | retry_count_++; |
| 164 | send_rr_or_rnr(Poll::POLL); |
| 165 | start_monitor_timer(); |
| 166 | } else if (tx_state_ == TxState::WAIT_F) { |
| 167 | CloseChannel(); |
| 168 | } |
| 169 | } |
| 170 | |
| 171 | void recv_i_frame(Final f, uint8_t tx_seq, uint8_t req_seq, SegmentationAndReassembly sar, |
| 172 | const packet::PacketView<true>& payload) { |
| 173 | if (rx_state_ == RxState::RECV) { |
| 174 | if (f == Final::NOT_SET && with_expected_tx_seq(tx_seq) && with_valid_req_seq(req_seq) && with_valid_f_bit(f) && |
| 175 | !local_busy()) { |
| 176 | increment_expected_tx_seq(); |
| 177 | pass_to_tx(req_seq, f); |
| 178 | data_indication(sar, payload); |
| 179 | send_ack(Final::NOT_SET); |
| 180 | } else if (f == Final::POLL_RESPONSE && with_expected_tx_seq(tx_seq) && with_valid_req_seq(req_seq) && |
| 181 | with_valid_f_bit(f) && !local_busy()) { |
| 182 | increment_expected_tx_seq(); |
| 183 | pass_to_tx(req_seq, f); |
| 184 | data_indication(sar, payload); |
| 185 | if (!rej_actioned_) { |
| 186 | retransmit_i_frames(req_seq); |
| 187 | send_pending_i_frames(); |
| 188 | } else { |
| 189 | rej_actioned_ = false; |
| 190 | } |
| 191 | send_ack(Final::NOT_SET); |
| 192 | } else if (with_duplicate_tx_seq(tx_seq) && with_valid_req_seq(req_seq) && with_valid_f_bit(f) && !local_busy()) { |
| 193 | pass_to_tx(req_seq, f); |
| 194 | } else if (with_unexpected_tx_seq(tx_seq) && with_valid_req_seq(req_seq) && with_valid_f_bit(f) && |
| 195 | !local_busy()) { |
| 196 | if constexpr (kSendSrej) { |
| 197 | // We don't support sending SREJ |
| 198 | } else { |
| 199 | pass_to_tx(req_seq, f); |
| 200 | send_rej(); |
| 201 | rx_state_ = RxState::REJ_SENT; |
| 202 | } |
| 203 | } else if (with_expected_tx_seq(tx_seq) && with_valid_req_seq(req_seq) && with_valid_f_bit(f) && local_busy()) { |
| 204 | pass_to_tx(req_seq, f); |
| 205 | store_or_ignore(); |
| 206 | } else if (with_valid_req_seq(req_seq) && not_with_expected_tx_seq(tx_seq) && with_valid_f_bit(f) && |
| 207 | local_busy()) { |
| 208 | pass_to_tx(req_seq, f); |
| 209 | } else if ((with_invalid_tx_seq(tx_seq) && controller_->local_tx_window_ > kMaxTxWin / 2) || |
| 210 | with_invalid_req_seq(req_seq)) { |
| 211 | CloseChannel(); |
| 212 | } else if (with_invalid_tx_seq(tx_seq) && controller_->local_tx_window_ <= kMaxTxWin / 2) { |
| 213 | // We decided to ignore |
| 214 | } |
| 215 | } else if (rx_state_ == RxState::REJ_SENT) { |
| 216 | if (f == Final::NOT_SET && with_expected_tx_seq(tx_seq) && with_valid_req_seq(req_seq) && with_valid_f_bit(f)) { |
| 217 | increment_expected_tx_seq(); |
| 218 | pass_to_tx(req_seq, f); |
| 219 | data_indication(sar, payload); |
| 220 | send_ack(Final::NOT_SET); |
| 221 | rx_state_ = RxState::RECV; |
| 222 | } else if (f == Final::POLL_RESPONSE && with_expected_tx_seq(tx_seq) && with_valid_req_seq(req_seq) && |
| 223 | with_valid_f_bit(f)) { |
| 224 | increment_expected_tx_seq(); |
| 225 | pass_to_tx(req_seq, f); |
| 226 | data_indication(sar, payload); |
| 227 | if (!rej_actioned_) { |
| 228 | retransmit_i_frames(req_seq); |
| 229 | send_pending_i_frames(); |
| 230 | } else { |
| 231 | rej_actioned_ = false; |
| 232 | } |
| 233 | send_ack(Final::NOT_SET); |
| 234 | rx_state_ = RxState::RECV; |
| 235 | } else if (with_unexpected_tx_seq(tx_seq) && with_valid_req_seq(req_seq) && with_valid_f_bit(f)) { |
| 236 | pass_to_tx(req_seq, f); |
| 237 | } |
| 238 | } else if (rx_state_ == RxState::SREJ_SENT) { |
| 239 | // SREJ NOT SUPPORTED |
| 240 | } |
| 241 | } |
| 242 | |
| 243 | void recv_rr(uint8_t req_seq, Poll p = Poll::NOT_SET, Final f = Final::NOT_SET) { |
| 244 | if (rx_state_ == RxState::RECV) { |
| 245 | if (p == Poll::NOT_SET && f == Final::NOT_SET && with_valid_req_seq(req_seq) && with_valid_f_bit(f)) { |
| 246 | pass_to_tx(req_seq, f); |
| 247 | if (remote_busy() && unacked_frames_ > 0) { |
| 248 | start_retrans_timer(); |
| 249 | } |
| 250 | remote_busy_ = false; |
| 251 | send_pending_i_frames(); |
| 252 | } else if (f == Final::POLL_RESPONSE && with_valid_req_seq(req_seq) && with_valid_f_bit(f)) { |
| 253 | remote_busy_ = false; |
| 254 | pass_to_tx(req_seq, f); |
| 255 | if (!rej_actioned_) { |
| 256 | retransmit_i_frames(req_seq, p); |
| 257 | } else { |
| 258 | rej_actioned_ = false; |
| 259 | } |
| 260 | send_pending_i_frames(); |
| 261 | } else if (p == Poll::POLL && with_valid_req_seq(req_seq) && with_valid_f_bit(f)) { |
| 262 | pass_to_tx(req_seq, f); |
| 263 | send_i_or_rr_or_rnr(Final::POLL_RESPONSE); |
| 264 | } else if (with_invalid_req_seq(req_seq)) { |
| 265 | CloseChannel(); |
| 266 | } |
| 267 | } else if (rx_state_ == RxState::REJ_SENT) { |
| 268 | if (f == Final::POLL_RESPONSE && with_valid_req_seq(req_seq) && with_valid_f_bit(f)) { |
| 269 | remote_busy_ = false; |
| 270 | pass_to_tx(req_seq, f); |
| 271 | if (!rej_actioned_) { |
| 272 | retransmit_i_frames(req_seq, p); |
| 273 | } else { |
| 274 | rej_actioned_ = false; |
| 275 | } |
| 276 | send_pending_i_frames(); |
| 277 | } else if (p == Poll::NOT_SET && f == Final::NOT_SET && with_valid_req_seq(req_seq) && with_valid_f_bit(f)) { |
| 278 | pass_to_tx(req_seq, f); |
| 279 | if (remote_busy() and unacked_frames_ > 0) { |
| 280 | start_retrans_timer(); |
| 281 | } |
| 282 | remote_busy_ = false; |
| 283 | send_ack(Final::NOT_SET); |
| 284 | } else if (p == Poll::POLL && with_valid_req_seq(req_seq) && with_valid_f_bit(f)) { |
| 285 | pass_to_tx(req_seq, f); |
| 286 | if (remote_busy() and unacked_frames_ > 0) { |
| 287 | start_retrans_timer(); |
| 288 | } |
| 289 | remote_busy_ = false; |
| 290 | send_rr(Final::POLL_RESPONSE); |
| 291 | } else if (with_invalid_req_seq(req_seq)) { |
| 292 | CloseChannel(); |
| 293 | } |
| 294 | } else if (rx_state_ == RxState::SREJ_SENT) { |
| 295 | // SREJ NOT SUPPORTED |
| 296 | } |
| 297 | } |
| 298 | |
| 299 | void recv_rej(uint8_t req_seq, Poll p = Poll::NOT_SET, Final f = Final::NOT_SET) { |
| 300 | if (rx_state_ == RxState::RECV) { |
| 301 | if (f == Final::NOT_SET && with_valid_req_seq_retrans(req_seq) && |
| 302 | retry_i_frames_less_than_max_transmit(req_seq) && with_valid_f_bit(f)) { |
| 303 | remote_busy_ = false; |
| 304 | pass_to_tx(req_seq, f); |
| 305 | retransmit_i_frames(req_seq, p); |
| 306 | send_pending_i_frames(); |
| 307 | if (p_bit_outstanding()) { |
| 308 | rej_actioned_ = true; |
| 309 | } |
| 310 | } else if (f == Final::POLL_RESPONSE && with_valid_req_seq_retrans(req_seq) && |
| 311 | retry_i_frames_less_than_max_transmit(req_seq) && with_valid_f_bit(f)) { |
| 312 | remote_busy_ = false; |
| 313 | pass_to_tx(req_seq, f); |
| 314 | if (!rej_actioned_) { |
| 315 | retransmit_i_frames(req_seq, p); |
| 316 | } else { |
| 317 | rej_actioned_ = false; |
| 318 | } |
| 319 | send_pending_i_frames(); |
| 320 | } else if (with_valid_req_seq_retrans(req_seq) && !retry_i_frames_less_than_max_transmit(req_seq)) { |
| 321 | CloseChannel(); |
| 322 | } else if (with_invalid_req_seq_retrans(req_seq)) { |
| 323 | CloseChannel(); |
| 324 | } |
| 325 | } else if (rx_state_ == RxState::REJ_SENT) { |
| 326 | if (f == Final::NOT_SET && with_valid_req_seq_retrans(req_seq) && |
| 327 | retry_i_frames_less_than_max_transmit(req_seq) && with_valid_f_bit(f)) { |
| 328 | remote_busy_ = false; |
| 329 | pass_to_tx(req_seq, f); |
| 330 | retransmit_i_frames(req_seq, p); |
| 331 | send_pending_i_frames(); |
| 332 | if (p_bit_outstanding()) { |
| 333 | rej_actioned_ = true; |
| 334 | } |
| 335 | } else if (f == Final::POLL_RESPONSE && with_valid_req_seq_retrans(req_seq) && |
| 336 | retry_i_frames_less_than_max_transmit(req_seq) && with_valid_f_bit(f)) { |
| 337 | remote_busy_ = false; |
| 338 | pass_to_tx(req_seq, f); |
| 339 | if (!rej_actioned_) { |
| 340 | retransmit_i_frames(req_seq, p); |
| 341 | } else { |
| 342 | rej_actioned_ = false; |
| 343 | } |
| 344 | send_pending_i_frames(); |
| 345 | } else if (with_valid_req_seq_retrans(req_seq) && !retry_i_frames_less_than_max_transmit(req_seq)) { |
| 346 | CloseChannel(); |
| 347 | } else if (with_invalid_req_seq_retrans(req_seq)) { |
| 348 | CloseChannel(); |
| 349 | } |
| 350 | } else if (rx_state_ == RxState::SREJ_SENT) { |
| 351 | // SREJ NOT SUPPORTED |
| 352 | } |
| 353 | } |
| 354 | |
| 355 | void recv_rnr(uint8_t req_seq, Poll p = Poll::NOT_SET, Final f = Final::NOT_SET) { |
| 356 | if (rx_state_ == RxState::RECV) { |
| 357 | if (p == Poll::NOT_SET && with_valid_req_seq(req_seq) && with_valid_f_bit(f)) { |
| 358 | remote_busy_ = true; |
| 359 | pass_to_tx(req_seq, f); |
| 360 | stop_retrans_timer(); |
| 361 | } else if (p == Poll::POLL && with_valid_req_seq(req_seq) && with_valid_f_bit(f)) { |
| 362 | remote_busy_ = true; |
| 363 | pass_to_tx(req_seq, f); |
| 364 | stop_retrans_timer(); |
| 365 | send_rr_or_rnr(Poll::NOT_SET, Final::POLL_RESPONSE); |
| 366 | } else if (with_invalid_req_seq_retrans(req_seq)) { |
| 367 | CloseChannel(); |
| 368 | } |
| 369 | } else if (rx_state_ == RxState::REJ_SENT) { |
| 370 | if (p == Poll::NOT_SET && with_valid_req_seq(req_seq) && with_valid_f_bit(f)) { |
| 371 | remote_busy_ = true; |
| 372 | pass_to_tx(req_seq, f); |
| 373 | send_rr(Final::POLL_RESPONSE); |
| 374 | } else if (p == Poll::POLL && with_valid_req_seq(req_seq) && with_valid_f_bit(f)) { |
| 375 | remote_busy_ = true; |
| 376 | pass_to_tx(req_seq, f); |
| 377 | send_rr(Final::NOT_SET); |
| 378 | } else if (with_invalid_req_seq_retrans(req_seq)) { |
| 379 | CloseChannel(); |
| 380 | } |
| 381 | } else if (rx_state_ == RxState::SREJ_SENT) { |
| 382 | // SREJ NOT SUPPORTED |
| 383 | } |
| 384 | } |
| 385 | |
| 386 | void recv_srej(uint8_t req_seq, Poll p = Poll::NOT_SET, Final f = Final::NOT_SET) { |
| 387 | if (rx_state_ == RxState::RECV) { |
| 388 | if (p == Poll::NOT_SET && f == Final::NOT_SET && with_valid_req_seq_retrans(req_seq) && |
| 389 | retry_i_frames_less_than_max_transmit(req_seq) && with_valid_f_bit(f)) { |
| 390 | remote_busy_ = false; |
| 391 | pass_to_tx_f_bit(f); |
| 392 | retransmit_requested_i_frame(req_seq, p); |
| 393 | if (p_bit_outstanding()) { |
| 394 | srej_actioned_ = true; |
| 395 | srej_save_req_seq_ = req_seq; |
| 396 | } |
| 397 | } else if (f == Final::POLL_RESPONSE && with_valid_req_seq_retrans(req_seq) && |
| 398 | retry_i_frames_less_than_max_transmit(req_seq) && with_valid_f_bit(f)) { |
| 399 | remote_busy_ = false; |
| 400 | pass_to_tx_f_bit(f); |
| 401 | if (srej_actioned_ && srej_save_req_seq_ == req_seq) { |
| 402 | srej_actioned_ = false; |
| 403 | } else { |
| 404 | retransmit_requested_i_frame(req_seq, p); |
| 405 | } |
| 406 | } else if (p == Poll::POLL && with_valid_req_seq_retrans(req_seq) && |
| 407 | retry_i_frames_less_than_max_transmit(req_seq) && with_valid_f_bit(f)) { |
| 408 | remote_busy_ = false; |
| 409 | pass_to_tx(req_seq, f); |
| 410 | retransmit_requested_i_frame(req_seq, p); |
| 411 | if (p_bit_outstanding()) { |
| 412 | srej_actioned_ = true; |
| 413 | srej_save_req_seq_ = req_seq; |
| 414 | } |
| 415 | } else if (with_valid_req_seq_retrans(req_seq) && !retry_i_frames_less_than_max_transmit(req_seq)) { |
| 416 | CloseChannel(); |
| 417 | } else if (with_invalid_req_seq_retrans(req_seq)) { |
| 418 | CloseChannel(); |
| 419 | } |
| 420 | } else if (rx_state_ == RxState::REJ_SENT) { |
| 421 | if (p == Poll::NOT_SET && f == Final::NOT_SET && with_valid_req_seq_retrans(req_seq) && |
| 422 | retry_i_frames_less_than_max_transmit(req_seq) && with_valid_f_bit(f)) { |
| 423 | remote_busy_ = false; |
| 424 | pass_to_tx_f_bit(f); |
| 425 | retransmit_requested_i_frame(req_seq, p); |
| 426 | if (p_bit_outstanding()) { |
| 427 | srej_actioned_ = true; |
| 428 | srej_save_req_seq_ = req_seq; |
| 429 | } |
| 430 | } else if (f == Final::POLL_RESPONSE && with_valid_req_seq_retrans(req_seq) && |
| 431 | retry_i_frames_less_than_max_transmit(req_seq) && with_valid_f_bit(f)) { |
| 432 | remote_busy_ = false; |
| 433 | pass_to_tx_f_bit(f); |
| 434 | if (srej_actioned_ && srej_save_req_seq_ == req_seq) { |
| 435 | srej_actioned_ = false; |
| 436 | } else { |
| 437 | retransmit_requested_i_frame(req_seq, p); |
| 438 | } |
| 439 | } else if (p == Poll::POLL && with_valid_req_seq_retrans(req_seq) && |
| 440 | retry_i_frames_less_than_max_transmit(req_seq) && with_valid_f_bit(f)) { |
| 441 | remote_busy_ = false; |
| 442 | pass_to_tx(req_seq, f); |
| 443 | retransmit_requested_i_frame(req_seq, p); |
| 444 | send_pending_i_frames(); |
| 445 | if (p_bit_outstanding()) { |
| 446 | srej_actioned_ = true; |
| 447 | srej_save_req_seq_ = req_seq; |
| 448 | } |
| 449 | } else if (with_valid_req_seq_retrans(req_seq) && !retry_i_frames_less_than_max_transmit(req_seq)) { |
| 450 | CloseChannel(); |
| 451 | } else if (with_invalid_req_seq_retrans(req_seq)) { |
| 452 | CloseChannel(); |
| 453 | } |
| 454 | } else if (rx_state_ == RxState::SREJ_SENT) { |
| 455 | // SREJ NOT SUPPORTED |
| 456 | } |
| 457 | } |
| 458 | |
| 459 | // Conditions (@see 8.6.5.5) |
| 460 | bool remote_busy() { |
| 461 | return remote_busy_; |
| 462 | } |
| 463 | |
| 464 | bool local_busy() { |
| 465 | return local_busy_; |
| 466 | } |
| 467 | |
| 468 | bool rem_window_not_full() { |
| 469 | return unacked_frames_ < controller_->remote_tx_window_; |
| 470 | } |
| 471 | |
| 472 | bool rem_window_full() { |
| 473 | return unacked_frames_ == controller_->remote_tx_window_; |
| 474 | } |
| 475 | |
| 476 | bool rnr_sent() { |
| 477 | return rnr_sent_; |
| 478 | } |
| 479 | |
| 480 | bool retry_i_frames_less_than_max_transmit(uint8_t req_seq) { |
| 481 | return retry_i_frames_[req_seq] < controller_->local_max_transmit_; |
| 482 | } |
| 483 | |
| 484 | bool retry_count_less_than_max_transmit() { |
| 485 | return retry_count_ < controller_->local_max_transmit_; |
| 486 | } |
| 487 | |
| 488 | bool with_expected_tx_seq(uint8_t tx_seq) { |
| 489 | return tx_seq == expected_tx_seq_; |
| 490 | } |
| 491 | |
| 492 | bool with_valid_req_seq(uint8_t req_seq) { |
Hansong Zhang | ba61624 | 2019-11-20 21:30:39 -0800 | [diff] [blame^] | 493 | return expected_ack_seq_ <= req_seq && req_seq <= next_tx_seq_; |
Hansong Zhang | 6594194 | 2019-11-16 23:48:30 -0800 | [diff] [blame] | 494 | } |
| 495 | |
| 496 | bool with_valid_req_seq_retrans(uint8_t req_seq) { |
Hansong Zhang | ba61624 | 2019-11-20 21:30:39 -0800 | [diff] [blame^] | 497 | return expected_ack_seq_ <= req_seq && req_seq <= next_tx_seq_; |
Hansong Zhang | 6594194 | 2019-11-16 23:48:30 -0800 | [diff] [blame] | 498 | } |
| 499 | |
| 500 | bool with_valid_f_bit(Final f) { |
| 501 | return f == Final::NOT_SET || tx_state_ == TxState::WAIT_F; |
| 502 | } |
| 503 | |
| 504 | bool with_unexpected_tx_seq(uint8_t tx_seq) { |
| 505 | return tx_seq > expected_tx_seq_ && tx_seq <= expected_tx_seq_ + controller_->local_tx_window_; |
| 506 | } |
| 507 | |
| 508 | bool with_duplicate_tx_seq(uint8_t tx_seq) { |
| 509 | return tx_seq < expected_tx_seq_ && tx_seq >= expected_tx_seq_ - controller_->local_tx_window_; |
| 510 | } |
| 511 | |
| 512 | bool with_invalid_tx_seq(uint8_t tx_seq) { |
| 513 | return tx_seq < expected_tx_seq_ - controller_->local_tx_window_ || |
| 514 | tx_seq > expected_tx_seq_ + controller_->local_tx_window_; |
| 515 | } |
| 516 | |
| 517 | bool with_invalid_req_seq(uint8_t req_seq) { |
| 518 | return req_seq < expected_ack_seq_ || req_seq >= next_tx_seq_; |
| 519 | } |
| 520 | |
| 521 | bool with_invalid_req_seq_retrans(uint8_t req_seq) { |
| 522 | return req_seq < expected_ack_seq_ || req_seq >= next_tx_seq_; |
| 523 | } |
| 524 | |
| 525 | bool not_with_expected_tx_seq(uint8_t tx_seq) { |
| 526 | return !with_invalid_tx_seq(tx_seq) && !with_expected_tx_seq(tx_seq); |
| 527 | } |
| 528 | |
| 529 | bool with_expected_tx_seq_srej() { |
| 530 | // We don't support sending SREJ |
| 531 | return false; |
| 532 | } |
| 533 | |
| 534 | bool send_req_is_true() { |
| 535 | // We don't support sending SREJ |
| 536 | return false; |
| 537 | } |
| 538 | |
| 539 | bool srej_list_is_one() { |
| 540 | // We don't support sending SREJ |
| 541 | return false; |
| 542 | } |
| 543 | |
| 544 | bool with_unexpected_tx_seq_srej() { |
| 545 | // We don't support sending SREJ |
| 546 | return false; |
| 547 | } |
| 548 | |
| 549 | bool with_duplicate_tx_seq_srej() { |
| 550 | // We don't support sending SREJ |
| 551 | return false; |
| 552 | } |
| 553 | |
| 554 | // Actions (@see 8.6.5.6) |
| 555 | |
Hansong Zhang | a631253 | 2019-11-19 14:01:36 -0800 | [diff] [blame] | 556 | void _send_i_frame(SegmentationAndReassembly sar, std::unique_ptr<CopyablePacketBuilder> segment, uint8_t req_seq, |
| 557 | uint8_t tx_seq, uint16_t sdu_size = 0, Final f = Final::NOT_SET) { |
| 558 | std::unique_ptr<EnhancedInformationFrameBuilder> builder; |
| 559 | if (sar == SegmentationAndReassembly::START) { |
| 560 | builder = EnhancedInformationStartFrameBuilder::Create(controller_->remote_cid_, tx_seq, f, req_seq, sdu_size, |
| 561 | std::move(segment)); |
| 562 | } else { |
| 563 | builder = EnhancedInformationFrameBuilder::Create(controller_->remote_cid_, tx_seq, f, req_seq, sar, |
| 564 | std::move(segment)); |
| 565 | } |
Hansong Zhang | 6594194 | 2019-11-16 23:48:30 -0800 | [diff] [blame] | 566 | controller_->send_pdu(std::move(builder)); |
| 567 | } |
| 568 | |
Hansong Zhang | a631253 | 2019-11-19 14:01:36 -0800 | [diff] [blame] | 569 | void send_data(SegmentationAndReassembly sar, uint16_t sdu_size, std::unique_ptr<packet::RawBuilder> segment, |
Hansong Zhang | 6594194 | 2019-11-16 23:48:30 -0800 | [diff] [blame] | 570 | Final f = Final::NOT_SET) { |
Hansong Zhang | a631253 | 2019-11-19 14:01:36 -0800 | [diff] [blame] | 571 | std::shared_ptr<packet::RawBuilder> shared_segment(segment.release()); |
Hansong Zhang | 6594194 | 2019-11-16 23:48:30 -0800 | [diff] [blame] | 572 | unacked_list_.emplace(std::piecewise_construct, std::forward_as_tuple(next_tx_seq_), |
Hansong Zhang | a631253 | 2019-11-19 14:01:36 -0800 | [diff] [blame] | 573 | std::forward_as_tuple(sar, sdu_size, shared_segment)); |
| 574 | |
| 575 | std::unique_ptr<CopyablePacketBuilder> copyable_packet_builder = |
| 576 | std::make_unique<CopyablePacketBuilder>(std::get<2>(unacked_list_.find(next_tx_seq_)->second)); |
| 577 | _send_i_frame(sar, std::move(copyable_packet_builder), buffer_seq_, next_tx_seq_, sdu_size, f); |
Hansong Zhang | 6594194 | 2019-11-16 23:48:30 -0800 | [diff] [blame] | 578 | // TODO hsz fix me |
| 579 | unacked_frames_++; |
| 580 | frames_sent_++; |
| 581 | retry_i_frames_[next_tx_seq_] = 1; |
| 582 | next_tx_seq_ = (next_tx_seq_ + 1) % kMaxTxWin; |
| 583 | start_retrans_timer(); |
| 584 | } |
| 585 | |
Hansong Zhang | a631253 | 2019-11-19 14:01:36 -0800 | [diff] [blame] | 586 | void pend_data(SegmentationAndReassembly sar, uint16_t sdu_size, std::unique_ptr<packet::RawBuilder> data) { |
| 587 | pending_frames_.emplace(std::make_tuple(sar, sdu_size, std::move(data))); |
Hansong Zhang | 6594194 | 2019-11-16 23:48:30 -0800 | [diff] [blame] | 588 | } |
| 589 | |
| 590 | void process_req_seq(uint8_t req_seq) { |
| 591 | for (int i = expected_ack_seq_; i < req_seq; i++) { |
| 592 | unacked_list_.erase(i); |
| 593 | retry_i_frames_[i] = 0; |
| 594 | } |
| 595 | unacked_frames_ -= ((req_seq - expected_ack_seq_) + kMaxTxWin) % kMaxTxWin; |
| 596 | if (unacked_frames_ == 0) { |
| 597 | stop_retrans_timer(); |
| 598 | } |
| 599 | } |
| 600 | |
| 601 | void _send_s_frame(SupervisoryFunction s, uint8_t req_seq, Poll p, Final f) { |
| 602 | auto builder = EnhancedSupervisoryFrameBuilder::Create(controller_->remote_cid_, s, p, f, req_seq); |
| 603 | controller_->send_pdu(std::move(builder)); |
| 604 | } |
| 605 | |
| 606 | void send_rr(Poll p) { |
| 607 | _send_s_frame(SupervisoryFunction::RECEIVER_READY, expected_tx_seq_, p, Final::NOT_SET); |
| 608 | } |
| 609 | |
| 610 | void send_rr(Final f) { |
| 611 | _send_s_frame(SupervisoryFunction::RECEIVER_READY, expected_tx_seq_, Poll::NOT_SET, f); |
| 612 | } |
| 613 | |
| 614 | void send_rnr(Poll p) { |
| 615 | _send_s_frame(SupervisoryFunction::RECEIVER_NOT_READY, expected_tx_seq_, p, Final::NOT_SET); |
| 616 | } |
| 617 | |
| 618 | void send_rnr(Final f) { |
| 619 | _send_s_frame(SupervisoryFunction::RECEIVER_NOT_READY, expected_tx_seq_, Poll::NOT_SET, f); |
| 620 | } |
| 621 | |
| 622 | void send_rej(Poll p = Poll::NOT_SET, Final f = Final::NOT_SET) { |
| 623 | _send_s_frame(SupervisoryFunction::REJECT, expected_tx_seq_, p, f); |
| 624 | } |
| 625 | |
| 626 | void send_rr_or_rnr(Poll p = Poll::NOT_SET, Final f = Final::NOT_SET) { |
| 627 | if (local_busy()) { |
| 628 | _send_s_frame(SupervisoryFunction::RECEIVER_NOT_READY, buffer_seq_, p, f); |
| 629 | } else { |
| 630 | _send_s_frame(SupervisoryFunction::RECEIVER_READY, buffer_seq_, p, f); |
| 631 | } |
| 632 | } |
| 633 | |
| 634 | void send_i_or_rr_or_rnr(Final f = Final::POLL_RESPONSE) { |
| 635 | auto frames_sent = 0; |
| 636 | if (local_busy()) { |
| 637 | send_rnr(Final::POLL_RESPONSE); |
| 638 | } |
| 639 | if (remote_busy() && unacked_frames_ > 0) { |
| 640 | start_retrans_timer(); |
| 641 | } |
| 642 | remote_busy_ = false; |
| 643 | send_pending_i_frames(f); // TODO: Only first has f = 1, other f = 0. Also increase frames_sent |
| 644 | if (!local_busy() && frames_sent == 0) { |
| 645 | send_rr(Final::POLL_RESPONSE); |
| 646 | } |
| 647 | } |
| 648 | |
| 649 | void send_srej() { |
| 650 | // Sending SREJ is not supported |
| 651 | } |
| 652 | |
| 653 | void start_retrans_timer() { |
| 654 | retrans_timer_.Schedule(common::BindOnce(&impl::retrans_timer_expires, common::Unretained(this)), |
| 655 | std::chrono::milliseconds(controller_->local_retransmit_timeout_ms_)); |
| 656 | } |
| 657 | |
| 658 | void start_monitor_timer() { |
| 659 | monitor_timer_.Schedule(common::BindOnce(&impl::monitor_timer_expires, common::Unretained(this)), |
| 660 | std::chrono::milliseconds(controller_->local_monitor_timeout_ms_)); |
| 661 | } |
| 662 | |
| 663 | void pass_to_tx(uint8_t req_seq, Final f) { |
| 664 | recv_req_seq_and_f_bit(req_seq, f); |
| 665 | } |
| 666 | |
| 667 | void pass_to_tx_f_bit(Final f) { |
| 668 | recv_f_bit(f); |
| 669 | } |
| 670 | |
| 671 | void data_indication(SegmentationAndReassembly sar, const packet::PacketView<true>& segment) { |
| 672 | controller_->stage_for_reassembly(sar, segment); |
| 673 | buffer_seq_ = (buffer_seq_ + 1) % kMaxTxWin; |
| 674 | } |
| 675 | |
| 676 | void increment_expected_tx_seq() { |
| 677 | expected_tx_seq_ = (expected_tx_seq_ + 1) % kMaxTxWin; |
| 678 | } |
| 679 | |
| 680 | void stop_retrans_timer() { |
| 681 | retrans_timer_.Cancel(); |
| 682 | } |
| 683 | |
| 684 | void stop_monitor_timer() { |
| 685 | monitor_timer_.Cancel(); |
| 686 | } |
| 687 | |
| 688 | void send_ack(Final f = Final::NOT_SET) { |
| 689 | if (local_busy()) { |
| 690 | send_rnr(f); |
| 691 | } else if (!remote_busy() && /* pending i frames exist */ rem_window_not_full()) { |
| 692 | send_pending_i_frames(f); |
| 693 | } else { |
| 694 | send_rr(f); |
| 695 | } |
| 696 | } |
| 697 | |
| 698 | void init_srej() { |
| 699 | // We don't support sending SREJ |
| 700 | } |
| 701 | |
| 702 | void save_i_frame_srej() { |
| 703 | // We don't support sending SREJ |
| 704 | } |
| 705 | |
| 706 | void store_or_ignore() { |
| 707 | // We choose to ignore. We don't support local busy so far. |
| 708 | } |
| 709 | |
| 710 | bool p_bit_outstanding() { |
| 711 | return tx_state_ == TxState::WAIT_F; |
| 712 | } |
| 713 | |
| 714 | void retransmit_i_frames(uint8_t req_seq, Poll p = Poll::NOT_SET) { |
| 715 | uint8_t i = req_seq; |
| 716 | Final f = (p == Poll::NOT_SET ? Final::NOT_SET : Final::POLL_RESPONSE); |
| 717 | while (unacked_list_.find(i) == unacked_list_.end()) { |
Hansong Zhang | a631253 | 2019-11-19 14:01:36 -0800 | [diff] [blame] | 718 | std::unique_ptr<CopyablePacketBuilder> copyable_packet_builder = |
| 719 | std::make_unique<CopyablePacketBuilder>(std::get<2>(unacked_list_.find(i)->second)); |
| 720 | _send_i_frame(std::get<0>(unacked_list_.find(i)->second), std::move(copyable_packet_builder), buffer_seq_, i, |
| 721 | std::get<1>(unacked_list_.find(i)->second), f); |
Hansong Zhang | 6594194 | 2019-11-16 23:48:30 -0800 | [diff] [blame] | 722 | retry_i_frames_[i]++; |
| 723 | if (retry_i_frames_[i] == controller_->local_max_transmit_) { |
| 724 | CloseChannel(); |
| 725 | } |
| 726 | frames_sent_++; |
| 727 | f = Final::NOT_SET; |
| 728 | } |
| 729 | start_retrans_timer(); |
| 730 | } |
| 731 | |
| 732 | void retransmit_requested_i_frame(uint8_t req_seq, Poll p) { |
| 733 | Final f = p == Poll::POLL ? Final::POLL_RESPONSE : Final::NOT_SET; |
| 734 | if (unacked_list_.find(req_seq) == unacked_list_.end()) { |
| 735 | LOG_ERROR("Received invalid SREJ"); |
| 736 | return; |
| 737 | } |
Hansong Zhang | a631253 | 2019-11-19 14:01:36 -0800 | [diff] [blame] | 738 | std::unique_ptr<CopyablePacketBuilder> copyable_packet_builder = |
| 739 | std::make_unique<CopyablePacketBuilder>(std::get<2>(unacked_list_.find(req_seq)->second)); |
| 740 | _send_i_frame(std::get<0>(unacked_list_.find(req_seq)->second), std::move(copyable_packet_builder), buffer_seq_, |
| 741 | req_seq, std::get<1>(unacked_list_.find(req_seq)->second), f); |
Hansong Zhang | 6594194 | 2019-11-16 23:48:30 -0800 | [diff] [blame] | 742 | retry_i_frames_[req_seq]++; |
| 743 | start_retrans_timer(); |
| 744 | } |
| 745 | |
| 746 | void send_pending_i_frames(Final f = Final::NOT_SET) { |
| 747 | if (p_bit_outstanding()) { |
| 748 | return; |
| 749 | } |
| 750 | while (rem_window_not_full() && !pending_frames_.empty()) { |
| 751 | auto& frame = pending_frames_.front(); |
Hansong Zhang | a631253 | 2019-11-19 14:01:36 -0800 | [diff] [blame] | 752 | send_data(std::get<0>(frame), std::get<1>(frame), std::move(std::get<2>(frame)), f); |
Hansong Zhang | 6594194 | 2019-11-16 23:48:30 -0800 | [diff] [blame] | 753 | pending_frames_.pop(); |
| 754 | f = Final::NOT_SET; |
| 755 | } |
| 756 | } |
| 757 | |
| 758 | void CloseChannel() { |
| 759 | // TODO: Needs a reference to signaller |
| 760 | } |
| 761 | |
| 762 | void pop_srej_list() { |
| 763 | // We don't support sending SREJ |
| 764 | } |
| 765 | |
| 766 | void data_indication_srej() { |
| 767 | // We don't support sending SREJ |
| 768 | } |
| 769 | }; |
| 770 | |
| 771 | // Segmentation is handled here |
Hansong Zhang | b096076 | 2019-11-14 17:57:10 -0800 | [diff] [blame] | 772 | void ErtmController::OnSdu(std::unique_ptr<packet::BasePacketBuilder> sdu) { |
Hansong Zhang | 6594194 | 2019-11-16 23:48:30 -0800 | [diff] [blame] | 773 | // TODO: Optimize the calculation. We don't need to count for SDU length in CONTINUATION or END packets. We don't need |
| 774 | // to FCS when disabled. |
Hansong Zhang | a631253 | 2019-11-19 14:01:36 -0800 | [diff] [blame] | 775 | size_t size_each_packet = |
Hansong Zhang | 6594194 | 2019-11-16 23:48:30 -0800 | [diff] [blame] | 776 | (remote_mps_ - 4 /* basic L2CAP header */ - 2 /* SDU length */ - 2 /* Extended control */ - 2 /* FCS */); |
Hansong Zhang | a631253 | 2019-11-19 14:01:36 -0800 | [diff] [blame] | 777 | auto sdu_size = sdu->size(); |
Hansong Zhang | 6594194 | 2019-11-16 23:48:30 -0800 | [diff] [blame] | 778 | std::vector<std::unique_ptr<packet::RawBuilder>> segments; |
| 779 | packet::FragmentingInserter fragmenting_inserter(size_each_packet, std::back_insert_iterator(segments)); |
| 780 | sdu->Serialize(fragmenting_inserter); |
Hansong Zhang | a631253 | 2019-11-19 14:01:36 -0800 | [diff] [blame] | 781 | fragmenting_inserter.finalize(); |
Hansong Zhang | 6594194 | 2019-11-16 23:48:30 -0800 | [diff] [blame] | 782 | if (segments.size() == 1) { |
Hansong Zhang | a631253 | 2019-11-19 14:01:36 -0800 | [diff] [blame] | 783 | pimpl_->data_request(SegmentationAndReassembly::UNSEGMENTED, std::move(segments[0])); |
Hansong Zhang | 6594194 | 2019-11-16 23:48:30 -0800 | [diff] [blame] | 784 | return; |
| 785 | } |
Hansong Zhang | a631253 | 2019-11-19 14:01:36 -0800 | [diff] [blame] | 786 | pimpl_->data_request(SegmentationAndReassembly::START, std::move(segments[0]), sdu_size); |
| 787 | for (auto i = 1; i < segments.size() - 1; i++) { |
| 788 | pimpl_->data_request(SegmentationAndReassembly::CONTINUATION, std::move(segments[i])); |
Hansong Zhang | 6594194 | 2019-11-16 23:48:30 -0800 | [diff] [blame] | 789 | } |
Hansong Zhang | a631253 | 2019-11-19 14:01:36 -0800 | [diff] [blame] | 790 | pimpl_->data_request(SegmentationAndReassembly::END, std::move(segments.back())); |
Hansong Zhang | b096076 | 2019-11-14 17:57:10 -0800 | [diff] [blame] | 791 | } |
| 792 | |
| 793 | void ErtmController::OnPdu(BasicFrameView pdu) { |
Hansong Zhang | 6594194 | 2019-11-16 23:48:30 -0800 | [diff] [blame] | 794 | auto standard_frame_view = StandardFrameView::Create(pdu); |
| 795 | if (!standard_frame_view.IsValid()) { |
| 796 | LOG_WARN("Received invalid frame"); |
| 797 | return; |
| 798 | } |
| 799 | auto type = standard_frame_view.GetFrameType(); |
| 800 | if (type == FrameType::I_FRAME) { |
| 801 | auto i_frame_view = EnhancedInformationFrameView::Create(standard_frame_view); |
| 802 | if (!i_frame_view.IsValid()) { |
| 803 | LOG_WARN("Received invalid frame"); |
| 804 | return; |
| 805 | } |
| 806 | pimpl_->recv_i_frame(i_frame_view.GetF(), i_frame_view.GetTxSeq(), i_frame_view.GetReqSeq(), i_frame_view.GetSar(), |
| 807 | i_frame_view.GetPayload()); |
| 808 | } else if (type == FrameType::S_FRAME) { |
| 809 | auto s_frame_view = EnhancedSupervisoryFrameView::Create(standard_frame_view); |
| 810 | if (!s_frame_view.IsValid()) { |
| 811 | LOG_WARN("Received invalid frame"); |
| 812 | return; |
| 813 | } |
| 814 | auto req_seq = s_frame_view.GetReqSeq(); |
| 815 | auto f = s_frame_view.GetF(); |
| 816 | auto p = s_frame_view.GetP(); |
| 817 | switch (s_frame_view.GetS()) { |
| 818 | case SupervisoryFunction::RECEIVER_READY: |
| 819 | pimpl_->recv_rr(req_seq, p, f); |
| 820 | break; |
| 821 | case SupervisoryFunction::RECEIVER_NOT_READY: |
| 822 | pimpl_->recv_rnr(req_seq, p, f); |
| 823 | break; |
| 824 | case SupervisoryFunction::REJECT: |
| 825 | pimpl_->recv_rej(req_seq, p, f); |
| 826 | break; |
| 827 | case SupervisoryFunction::SELECT_REJECT: |
| 828 | pimpl_->recv_srej(req_seq, p, f); |
| 829 | break; |
| 830 | } |
| 831 | } else { |
| 832 | LOG_WARN("Received invalid frame"); |
| 833 | } |
Hansong Zhang | b096076 | 2019-11-14 17:57:10 -0800 | [diff] [blame] | 834 | } |
| 835 | |
| 836 | std::unique_ptr<BasicFrameBuilder> ErtmController::GetNextPacket() { |
| 837 | auto next = std::move(pdu_queue_.front()); |
| 838 | pdu_queue_.pop(); |
| 839 | return next; |
| 840 | } |
| 841 | |
Hansong Zhang | 6594194 | 2019-11-16 23:48:30 -0800 | [diff] [blame] | 842 | void ErtmController::stage_for_reassembly(SegmentationAndReassembly sar, |
| 843 | const packet::PacketView<kLittleEndian>& payload) { |
| 844 | switch (sar) { |
| 845 | case SegmentationAndReassembly::UNSEGMENTED: |
| 846 | enqueue_buffer_.Enqueue(std::make_unique<packet::PacketView<kLittleEndian>>(payload), handler_); |
| 847 | break; |
| 848 | case SegmentationAndReassembly::START: |
| 849 | if (sar_state_ != SegmentationAndReassembly::END) { |
| 850 | LOG_WARN("Received invalid SAR"); |
| 851 | close_channel(); |
| 852 | return; |
| 853 | } |
| 854 | sar_state_ = SegmentationAndReassembly::START; |
| 855 | reassembly_stage_ = payload; |
| 856 | break; |
| 857 | case SegmentationAndReassembly::CONTINUATION: |
| 858 | if (sar_state_ == SegmentationAndReassembly::END) { |
| 859 | LOG_WARN("Received invalid SAR"); |
| 860 | close_channel(); |
| 861 | return; |
| 862 | } |
| 863 | reassembly_stage_.AppendPacketView(payload); |
| 864 | break; |
| 865 | case SegmentationAndReassembly::END: |
| 866 | if (sar_state_ == SegmentationAndReassembly::END) { |
| 867 | LOG_WARN("Received invalid SAR"); |
| 868 | close_channel(); |
| 869 | return; |
| 870 | } |
| 871 | reassembly_stage_.AppendPacketView(payload); |
| 872 | enqueue_buffer_.Enqueue(std::make_unique<packet::PacketView<kLittleEndian>>(reassembly_stage_), handler_); |
| 873 | sar_state_ = SegmentationAndReassembly::END; |
| 874 | break; |
| 875 | } |
| 876 | } |
| 877 | |
| 878 | void ErtmController::send_pdu(std::unique_ptr<BasicFrameBuilder> pdu) { |
| 879 | pdu_queue_.emplace(std::move(pdu)); |
| 880 | scheduler_->OnPacketsReady(cid_, 1); |
| 881 | } |
| 882 | |
| 883 | void ErtmController::close_channel() { |
| 884 | // TODO: Get a reference to signalling manager |
| 885 | } |
| 886 | |
| 887 | size_t ErtmController::CopyablePacketBuilder::size() const { |
| 888 | return builder_->size(); |
| 889 | } |
| 890 | |
| 891 | void ErtmController::CopyablePacketBuilder::Serialize(BitInserter& it) const { |
| 892 | builder_->Serialize(it); |
| 893 | } |
| 894 | |
Hansong Zhang | b096076 | 2019-11-14 17:57:10 -0800 | [diff] [blame] | 895 | } // namespace internal |
| 896 | } // namespace l2cap |
| 897 | } // namespace bluetooth |