blob: ddf7ebbaaef319dd54ca348e41a7dce5961240ea [file] [log] [blame]
Johan Hedberg7dec65c2012-07-16 16:12:02 +03001/*
2 *
3 * Bluetooth HCI Three-wire UART driver
4 *
5 * Copyright (C) 2012 Intel Corporation
6 *
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24#include <linux/kernel.h>
25#include <linux/errno.h>
26#include <linux/skbuff.h>
27
28#include <net/bluetooth/bluetooth.h>
29#include <net/bluetooth/hci_core.h>
30
31#include "hci_uart.h"
32
Johan Hedberg3f27e952012-07-16 16:12:04 +030033#define H5_TXWINSIZE 4
34
35#define H5_ACK_TIMEOUT msecs_to_jiffies(250)
36
Johan Hedbergbc1f35b2012-07-16 16:12:05 +030037/*
38 * Maximum Three-wire packet:
39 * 4 byte header + max value for 12-bit length + 2 bytes for CRC
40 */
41#define H5_MAX_LEN (4 + 0xfff + 2)
42
43#define SLIP_DELIMITER 0xc0
44#define SLIP_ESC 0xdb
45#define SLIP_ESC_DELIM 0xdc
46#define SLIP_ESC_ESC 0xdd
47
Johan Hedberg7d664fb2012-07-16 16:12:03 +030048struct h5 {
Johan Hedbergbc1f35b2012-07-16 16:12:05 +030049 struct sk_buff_head unack; /* Unack'ed packets queue */
50 struct sk_buff_head rel; /* Reliable packets queue */
51 struct sk_buff_head unrel; /* Unreliable packets queue */
Johan Hedberg7d664fb2012-07-16 16:12:03 +030052
Johan Hedbergbc1f35b2012-07-16 16:12:05 +030053 struct sk_buff *rx_skb; /* Receive buffer */
54 size_t rx_pending; /* Expecting more bytes */
55 bool rx_esc; /* SLIP escape mode */
Johan Hedberg7d664fb2012-07-16 16:12:03 +030056
Johan Hedbergbc1f35b2012-07-16 16:12:05 +030057 int (*rx_func) (struct hci_uart *hu, u8 c);
Johan Hedberg3f27e952012-07-16 16:12:04 +030058
Johan Hedbergbc1f35b2012-07-16 16:12:05 +030059 struct timer_list timer; /* Retransmission timer */
Johan Hedberg7d664fb2012-07-16 16:12:03 +030060
Johan Hedbergbc1f35b2012-07-16 16:12:05 +030061 bool txack_req;
62
63 u8 msgq_txseq;
Johan Hedberg7d664fb2012-07-16 16:12:03 +030064};
65
Johan Hedbergbc1f35b2012-07-16 16:12:05 +030066static void h5_reset_rx(struct h5 *h5);
67
Johan Hedberg3f27e952012-07-16 16:12:04 +030068static void h5_timed_event(unsigned long arg)
69{
70 struct hci_uart *hu = (struct hci_uart *) arg;
71 struct h5 *h5 = hu->priv;
72 struct sk_buff *skb;
73 unsigned long flags;
74
75 BT_DBG("hu %p retransmitting %u pkts", hu, h5->unack.qlen);
76
77 spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING);
78
79 while ((skb = __skb_dequeue_tail(&h5->unack)) != NULL) {
80 h5->msgq_txseq = (h5->msgq_txseq - 1) & 0x07;
81 skb_queue_head(&h5->rel, skb);
82 }
83
84 spin_unlock_irqrestore(&h5->unack.lock, flags);
85
86 hci_uart_tx_wakeup(hu);
87}
88
Johan Hedberg7dec65c2012-07-16 16:12:02 +030089static int h5_open(struct hci_uart *hu)
90{
Johan Hedberg7d664fb2012-07-16 16:12:03 +030091 struct h5 *h5;
92
93 BT_DBG("hu %p", hu);
94
95 h5 = kzalloc(sizeof(*h5), GFP_KERNEL);
96 if (!h5)
97 return -ENOMEM;
98
99 hu->priv = h5;
100
101 skb_queue_head_init(&h5->unack);
102 skb_queue_head_init(&h5->rel);
103 skb_queue_head_init(&h5->unrel);
104
Johan Hedbergbc1f35b2012-07-16 16:12:05 +0300105 h5_reset_rx(h5);
106
Johan Hedberg3f27e952012-07-16 16:12:04 +0300107 init_timer(&h5->timer);
108 h5->timer.function = h5_timed_event;
109 h5->timer.data = (unsigned long) hu;
110
Johan Hedberg7d664fb2012-07-16 16:12:03 +0300111 return 0;
Johan Hedberg7dec65c2012-07-16 16:12:02 +0300112}
113
114static int h5_close(struct hci_uart *hu)
115{
Johan Hedberg7d664fb2012-07-16 16:12:03 +0300116 struct h5 *h5 = hu->priv;
117
118 skb_queue_purge(&h5->unack);
119 skb_queue_purge(&h5->rel);
120 skb_queue_purge(&h5->unrel);
121
Johan Hedberg3f27e952012-07-16 16:12:04 +0300122 del_timer(&h5->timer);
123
Johan Hedberg7d664fb2012-07-16 16:12:03 +0300124 kfree(h5);
125
126 return 0;
Johan Hedberg7dec65c2012-07-16 16:12:02 +0300127}
128
Johan Hedbergbc1f35b2012-07-16 16:12:05 +0300129static void h5_handle_internal_rx(struct hci_uart *hu)
130{
131 BT_DBG("%s", hu->hdev->name);
132}
133
134static void h5_complete_rx_pkt(struct hci_uart *hu)
135{
136 struct h5 *h5 = hu->priv;
137 u8 pkt_type;
138
139 BT_DBG("%s", hu->hdev->name);
140
141 pkt_type = h5->rx_skb->data[1] & 0x0f;
142
143 switch (pkt_type) {
144 case HCI_EVENT_PKT:
145 case HCI_ACLDATA_PKT:
146 case HCI_SCODATA_PKT:
147 bt_cb(h5->rx_skb)->pkt_type = pkt_type;
148
149 /* Remove Three-wire header */
150 skb_pull(h5->rx_skb, 4);
151
152 hci_recv_frame(h5->rx_skb);
153 h5->rx_skb = NULL;
154
155 break;
156
157 default:
158 h5_handle_internal_rx(hu);
159 break;
160 }
161
162 h5_reset_rx(h5);
163}
164
165static int h5_rx_crc(struct hci_uart *hu, unsigned char c)
166{
167 struct h5 *h5 = hu->priv;
168
169 BT_DBG("%s 0x%02hhx", hu->hdev->name, c);
170
171 h5_complete_rx_pkt(hu);
172 h5_reset_rx(h5);
173
174 return 0;
175}
176
177static int h5_rx_payload(struct hci_uart *hu, unsigned char c)
178{
179 struct h5 *h5 = hu->priv;
180 const unsigned char *hdr = h5->rx_skb->data;
181
182 BT_DBG("%s 0x%02hhx", hu->hdev->name, c);
183
184 if ((hdr[0] >> 4) & 0x01) {
185 h5->rx_func = h5_rx_crc;
186 h5->rx_pending = 2;
187 } else {
188 h5_complete_rx_pkt(hu);
189 h5_reset_rx(h5);
190 }
191
192 return 0;
193}
194
195static int h5_rx_3wire_hdr(struct hci_uart *hu, unsigned char c)
196{
197 struct h5 *h5 = hu->priv;
198 const unsigned char *hdr = h5->rx_skb->data;
199
200 BT_DBG("%s 0x%02hhx", hu->hdev->name, c);
201
202 if (((hdr[0] + hdr[1] + hdr[2] + hdr[3]) & 0xff) != 0xff) {
203 BT_ERR("Invalid header checksum");
204 h5_reset_rx(h5);
205 return 0;
206 }
207
208 h5->rx_func = h5_rx_payload;
209 h5->rx_pending = ((hdr[1] >> 4) & 0xff) + (hdr[2] << 4);
210
211 return 0;
212}
213
214static int h5_rx_pkt_start(struct hci_uart *hu, unsigned char c)
215{
216 struct h5 *h5 = hu->priv;
217
218 BT_DBG("%s 0x%02hhx", hu->hdev->name, c);
219
220 if (c == SLIP_DELIMITER)
221 return 1;
222
223 h5->rx_func = h5_rx_3wire_hdr;
224 h5->rx_pending = 4;
225
226 h5->rx_skb = bt_skb_alloc(H5_MAX_LEN, GFP_ATOMIC);
227 if (!h5->rx_skb) {
228 BT_ERR("Can't allocate mem for new packet");
229 h5_reset_rx(h5);
230 return -ENOMEM;
231 }
232
233 h5->rx_skb->dev = (void *) hu->hdev;
234
235 return 0;
236}
237
238static int h5_rx_delimiter(struct hci_uart *hu, unsigned char c)
239{
240 struct h5 *h5 = hu->priv;
241
242 BT_DBG("%s 0x%02hhx", hu->hdev->name, c);
243
244 if (c == SLIP_DELIMITER)
245 h5->rx_func = h5_rx_pkt_start;
246
247 return 1;
248}
249
250static void h5_unslip_one_byte(struct h5 *h5, unsigned char c)
251{
252 const u8 delim = SLIP_DELIMITER, esc = SLIP_ESC;
253 const u8 *byte = &c;
254
255 if (!h5->rx_esc && c == SLIP_ESC) {
256 h5->rx_esc = true;
257 return;
258 }
259
260 if (h5->rx_esc) {
261 switch (c) {
262 case SLIP_ESC_DELIM:
263 byte = &delim;
264 break;
265 case SLIP_ESC_ESC:
266 byte = &esc;
267 break;
268 default:
269 BT_ERR("Invalid esc byte 0x%02hhx", c);
270 h5_reset_rx(h5);
271 return;
272 }
273
274 h5->rx_esc = false;
275 }
276
277 memcpy(skb_put(h5->rx_skb, 1), byte, 1);
278 h5->rx_pending--;
279
280 BT_DBG("unsliped 0x%02hhx", *byte);
281}
282
283static void h5_reset_rx(struct h5 *h5)
284{
285 if (h5->rx_skb) {
286 kfree_skb(h5->rx_skb);
287 h5->rx_skb = NULL;
288 }
289
290 h5->rx_func = h5_rx_delimiter;
291 h5->rx_pending = 0;
292 h5->rx_esc = false;
293}
294
Johan Hedberg7dec65c2012-07-16 16:12:02 +0300295static int h5_recv(struct hci_uart *hu, void *data, int count)
296{
Johan Hedbergbc1f35b2012-07-16 16:12:05 +0300297 struct h5 *h5 = hu->priv;
298 unsigned char *ptr = data;
299
300 BT_DBG("%s count %d", hu->hdev->name, count);
301
302 while (count > 0) {
303 int processed;
304
305 if (h5->rx_pending > 0) {
306 if (*ptr == SLIP_DELIMITER) {
307 BT_ERR("Too short H5 packet");
308 h5_reset_rx(h5);
309 continue;
310 }
311
312 h5_unslip_one_byte(h5, *ptr);
313
314 ptr++; count--;
315 continue;
316 }
317
318 processed = h5->rx_func(hu, *ptr);
319 if (processed < 0)
320 return processed;
321
322 ptr += processed;
323 count -= processed;
324 }
325
326 return 0;
Johan Hedberg7dec65c2012-07-16 16:12:02 +0300327}
328
329static int h5_enqueue(struct hci_uart *hu, struct sk_buff *skb)
330{
Johan Hedberg7d664fb2012-07-16 16:12:03 +0300331 struct h5 *h5 = hu->priv;
332
333 if (skb->len > 0xfff) {
334 BT_ERR("Packet too long (%u bytes)", skb->len);
335 kfree_skb(skb);
336 return 0;
337 }
338
339 switch (bt_cb(skb)->pkt_type) {
340 case HCI_ACLDATA_PKT:
341 case HCI_COMMAND_PKT:
342 skb_queue_tail(&h5->rel, skb);
343 break;
344
345 case HCI_SCODATA_PKT:
346 skb_queue_tail(&h5->unrel, skb);
347 break;
348
349 default:
350 BT_ERR("Unknown packet type %u", bt_cb(skb)->pkt_type);
351 kfree_skb(skb);
352 break;
353 }
354
355 return 0;
356}
357
358static struct sk_buff *h5_prepare_pkt(struct h5 *h5, struct sk_buff *skb)
359{
360 h5->txack_req = false;
361 return NULL;
362}
363
364static struct sk_buff *h5_prepare_ack(struct h5 *h5)
365{
366 h5->txack_req = false;
367 return NULL;
Johan Hedberg7dec65c2012-07-16 16:12:02 +0300368}
369
370static struct sk_buff *h5_dequeue(struct hci_uart *hu)
371{
Johan Hedberg7d664fb2012-07-16 16:12:03 +0300372 struct h5 *h5 = hu->priv;
Johan Hedberg3f27e952012-07-16 16:12:04 +0300373 unsigned long flags;
Johan Hedberg7d664fb2012-07-16 16:12:03 +0300374 struct sk_buff *skb, *nskb;
375
376 if ((skb = skb_dequeue(&h5->unrel)) != NULL) {
377 nskb = h5_prepare_pkt(h5, skb);
378 if (nskb) {
379 kfree_skb(skb);
380 return nskb;
381 }
382
383 skb_queue_head(&h5->unrel, skb);
384 BT_ERR("Could not dequeue pkt because alloc_skb failed");
385 }
386
Johan Hedberg3f27e952012-07-16 16:12:04 +0300387 spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING);
388
389 if (h5->unack.qlen >= H5_TXWINSIZE)
390 goto unlock;
391
392 if ((skb = skb_dequeue(&h5->rel)) != NULL) {
393 nskb = h5_prepare_pkt(h5, skb);
394
395 if (nskb) {
396 __skb_queue_tail(&h5->unack, skb);
397 mod_timer(&h5->timer, jiffies + H5_ACK_TIMEOUT);
398 spin_unlock_irqrestore(&h5->unack.lock, flags);
399 return nskb;
400 }
401
402 skb_queue_head(&h5->rel, skb);
403 BT_ERR("Could not dequeue pkt because alloc_skb failed");
404 }
405
406unlock:
407 spin_unlock_irqrestore(&h5->unack.lock, flags);
408
Johan Hedberg7d664fb2012-07-16 16:12:03 +0300409 if (h5->txack_req)
410 return h5_prepare_ack(h5);
411
Johan Hedberg7dec65c2012-07-16 16:12:02 +0300412 return NULL;
413}
414
415static int h5_flush(struct hci_uart *hu)
416{
Johan Hedberg7d664fb2012-07-16 16:12:03 +0300417 BT_DBG("hu %p", hu);
418 return 0;
Johan Hedberg7dec65c2012-07-16 16:12:02 +0300419}
420
421static struct hci_uart_proto h5p = {
422 .id = HCI_UART_3WIRE,
423 .open = h5_open,
424 .close = h5_close,
425 .recv = h5_recv,
426 .enqueue = h5_enqueue,
427 .dequeue = h5_dequeue,
428 .flush = h5_flush,
429};
430
431int __init h5_init(void)
432{
433 int err = hci_uart_register_proto(&h5p);
434
435 if (!err)
436 BT_INFO("HCI Three-wire UART (H5) protocol initialized");
437 else
438 BT_ERR("HCI Three-wire UART (H5) protocol init failed");
439
440 return err;
441}
442
443int __exit h5_deinit(void)
444{
445 return hci_uart_unregister_proto(&h5p);
446}