blob: 0927c108bd144630e919910b33e4acba8ce72157 [file] [log] [blame]
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
4 * Author: Daniel Martensson / daniel.martensson@stericsson.com
5 * Dmitry.Tarnyagin / dmitry.tarnyagin@stericsson.com
6 * License terms: GNU General Public License (GPL) version 2.
7 */
8
sjur.brandeland@stericsson.com39abbae2012-04-12 08:27:27 +00009#define pr_fmt(fmt) KBUILD_MODNAME fmt
10
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +000011#include <linux/init.h>
12#include <linux/module.h>
13#include <linux/device.h>
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +000014#include <linux/netdevice.h>
15#include <linux/string.h>
16#include <linux/list.h>
17#include <linux/interrupt.h>
18#include <linux/delay.h>
19#include <linux/sched.h>
20#include <linux/if_arp.h>
21#include <linux/timer.h>
Sjur Brændelandc4125402012-06-25 07:49:41 +000022#include <net/rtnetlink.h>
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +000023#include <linux/pkt_sched.h>
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +000024#include <net/caif/caif_layer.h>
25#include <net/caif/caif_hsi.h>
26
27MODULE_LICENSE("GPL");
28MODULE_AUTHOR("Daniel Martensson<daniel.martensson@stericsson.com>");
29MODULE_DESCRIPTION("CAIF HSI driver");
30
31/* Returns the number of padding bytes for alignment. */
32#define PAD_POW2(x, pow) ((((x)&((pow)-1)) == 0) ? 0 :\
33 (((pow)-((x)&((pow)-1)))))
34
Dmitry Tarnyagin28bd2042011-10-13 11:29:27 +000035static int inactivity_timeout = 1000;
36module_param(inactivity_timeout, int, S_IRUGO | S_IWUSR);
37MODULE_PARM_DESC(inactivity_timeout, "Inactivity timeout on HSI, ms.");
38
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +000039static int aggregation_timeout = 1;
40module_param(aggregation_timeout, int, S_IRUGO | S_IWUSR);
41MODULE_PARM_DESC(aggregation_timeout, "Aggregation timeout on HSI, ms.");
42
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +000043/*
44 * HSI padding options.
45 * Warning: must be a base of 2 (& operation used) and can not be zero !
46 */
47static int hsi_head_align = 4;
48module_param(hsi_head_align, int, S_IRUGO);
49MODULE_PARM_DESC(hsi_head_align, "HSI head alignment.");
50
51static int hsi_tail_align = 4;
52module_param(hsi_tail_align, int, S_IRUGO);
53MODULE_PARM_DESC(hsi_tail_align, "HSI tail alignment.");
54
55/*
56 * HSI link layer flowcontrol thresholds.
57 * Warning: A high threshold value migth increase throughput but it will at
58 * the same time prevent channel prioritization and increase the risk of
59 * flooding the modem. The high threshold should be above the low.
60 */
61static int hsi_high_threshold = 100;
62module_param(hsi_high_threshold, int, S_IRUGO);
63MODULE_PARM_DESC(hsi_high_threshold, "HSI high threshold (FLOW OFF).");
64
65static int hsi_low_threshold = 50;
66module_param(hsi_low_threshold, int, S_IRUGO);
67MODULE_PARM_DESC(hsi_low_threshold, "HSI high threshold (FLOW ON).");
68
69#define ON 1
70#define OFF 0
71
72/*
73 * Threshold values for the HSI packet queue. Flowcontrol will be asserted
74 * when the number of packets exceeds HIGH_WATER_MARK. It will not be
75 * de-asserted before the number of packets drops below LOW_WATER_MARK.
76 */
77#define LOW_WATER_MARK hsi_low_threshold
78#define HIGH_WATER_MARK hsi_high_threshold
79
80static LIST_HEAD(cfhsi_list);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +000081
82static void cfhsi_inactivity_tout(unsigned long arg)
83{
84 struct cfhsi *cfhsi = (struct cfhsi *)arg;
85
Sjur Brændeland90de9bab2012-06-25 07:49:38 +000086 netdev_dbg(cfhsi->ndev, "%s.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +000087 __func__);
88
89 /* Schedule power down work queue. */
90 if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
91 queue_work(cfhsi->wq, &cfhsi->wake_down_work);
92}
93
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +000094static void cfhsi_update_aggregation_stats(struct cfhsi *cfhsi,
95 const struct sk_buff *skb,
96 int direction)
97{
98 struct caif_payload_info *info;
99 int hpad, tpad, len;
100
101 info = (struct caif_payload_info *)&skb->cb;
102 hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
103 tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);
104 len = skb->len + hpad + tpad;
105
106 if (direction > 0)
107 cfhsi->aggregation_len += len;
108 else if (direction < 0)
109 cfhsi->aggregation_len -= len;
110}
111
112static bool cfhsi_can_send_aggregate(struct cfhsi *cfhsi)
113{
114 int i;
115
Kim Lilliestierna XXa5c96b52012-06-25 07:49:37 +0000116 if (cfhsi->aggregation_timeout == 0)
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000117 return true;
118
119 for (i = 0; i < CFHSI_PRIO_BEBK; ++i) {
120 if (cfhsi->qhead[i].qlen)
121 return true;
122 }
123
124 /* TODO: Use aggregation_len instead */
125 if (cfhsi->qhead[CFHSI_PRIO_BEBK].qlen >= CFHSI_MAX_PKTS)
126 return true;
127
128 return false;
129}
130
131static struct sk_buff *cfhsi_dequeue(struct cfhsi *cfhsi)
132{
133 struct sk_buff *skb;
134 int i;
135
136 for (i = 0; i < CFHSI_PRIO_LAST; ++i) {
137 skb = skb_dequeue(&cfhsi->qhead[i]);
138 if (skb)
139 break;
140 }
141
142 return skb;
143}
144
145static int cfhsi_tx_queue_len(struct cfhsi *cfhsi)
146{
147 int i, len = 0;
148 for (i = 0; i < CFHSI_PRIO_LAST; ++i)
149 len += skb_queue_len(&cfhsi->qhead[i]);
150 return len;
151}
152
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000153static void cfhsi_abort_tx(struct cfhsi *cfhsi)
154{
155 struct sk_buff *skb;
156
157 for (;;) {
158 spin_lock_bh(&cfhsi->lock);
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000159 skb = cfhsi_dequeue(cfhsi);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000160 if (!skb)
161 break;
162
163 cfhsi->ndev->stats.tx_errors++;
164 cfhsi->ndev->stats.tx_dropped++;
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000165 cfhsi_update_aggregation_stats(cfhsi, skb, -1);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000166 spin_unlock_bh(&cfhsi->lock);
167 kfree_skb(skb);
168 }
169 cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
170 if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000171 mod_timer(&cfhsi->inactivity_timer,
Dmitry Tarnyagin28bd2042011-10-13 11:29:27 +0000172 jiffies + cfhsi->inactivity_timeout);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000173 spin_unlock_bh(&cfhsi->lock);
174}
175
176static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
177{
178 char buffer[32]; /* Any reasonable value */
179 size_t fifo_occupancy;
180 int ret;
181
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000182 netdev_dbg(cfhsi->ndev, "%s.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000183 __func__);
184
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000185 do {
Sjur Brændeland1c385f12012-06-25 07:49:42 +0000186 ret = cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000187 &fifo_occupancy);
188 if (ret) {
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000189 netdev_warn(cfhsi->ndev,
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000190 "%s: can't get FIFO occupancy: %d.\n",
191 __func__, ret);
192 break;
193 } else if (!fifo_occupancy)
194 /* No more data, exitting normally */
195 break;
196
197 fifo_occupancy = min(sizeof(buffer), fifo_occupancy);
198 set_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
Sjur Brændeland1c385f12012-06-25 07:49:42 +0000199 ret = cfhsi->ops->cfhsi_rx(buffer, fifo_occupancy,
200 cfhsi->ops);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000201 if (ret) {
202 clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000203 netdev_warn(cfhsi->ndev,
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000204 "%s: can't read data: %d.\n",
205 __func__, ret);
206 break;
207 }
208
209 ret = 5 * HZ;
Daniel Martensson687b13e2011-10-13 11:29:25 +0000210 ret = wait_event_interruptible_timeout(cfhsi->flush_fifo_wait,
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000211 !test_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits), ret);
212
213 if (ret < 0) {
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000214 netdev_warn(cfhsi->ndev,
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000215 "%s: can't wait for flush complete: %d.\n",
216 __func__, ret);
217 break;
218 } else if (!ret) {
219 ret = -ETIMEDOUT;
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000220 netdev_warn(cfhsi->ndev,
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000221 "%s: timeout waiting for flush complete.\n",
222 __func__);
223 break;
224 }
225 } while (1);
226
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000227 return ret;
228}
229
230static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
231{
232 int nfrms = 0;
233 int pld_len = 0;
234 struct sk_buff *skb;
235 u8 *pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
236
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000237 skb = cfhsi_dequeue(cfhsi);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000238 if (!skb)
239 return 0;
240
sjur.brandeland@stericsson.com94230fe2011-10-13 11:29:22 +0000241 /* Clear offset. */
242 desc->offset = 0;
243
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000244 /* Check if we can embed a CAIF frame. */
245 if (skb->len < CFHSI_MAX_EMB_FRM_SZ) {
246 struct caif_payload_info *info;
Sjur Brændelandb42f7b52012-06-25 07:49:39 +0000247 int hpad;
248 int tpad;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000249
250 /* Calculate needed head alignment and tail alignment. */
251 info = (struct caif_payload_info *)&skb->cb;
252
253 hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
254 tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);
255
256 /* Check if frame still fits with added alignment. */
257 if ((skb->len + hpad + tpad) <= CFHSI_MAX_EMB_FRM_SZ) {
258 u8 *pemb = desc->emb_frm;
259 desc->offset = CFHSI_DESC_SHORT_SZ;
260 *pemb = (u8)(hpad - 1);
261 pemb += hpad;
262
263 /* Update network statistics. */
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000264 spin_lock_bh(&cfhsi->lock);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000265 cfhsi->ndev->stats.tx_packets++;
266 cfhsi->ndev->stats.tx_bytes += skb->len;
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000267 cfhsi_update_aggregation_stats(cfhsi, skb, -1);
268 spin_unlock_bh(&cfhsi->lock);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000269
270 /* Copy in embedded CAIF frame. */
271 skb_copy_bits(skb, 0, pemb, skb->len);
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000272
273 /* Consume the SKB */
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000274 consume_skb(skb);
275 skb = NULL;
276 }
sjur.brandeland@stericsson.com94230fe2011-10-13 11:29:22 +0000277 }
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000278
279 /* Create payload CAIF frames. */
280 pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
281 while (nfrms < CFHSI_MAX_PKTS) {
282 struct caif_payload_info *info;
Sjur Brændelandb42f7b52012-06-25 07:49:39 +0000283 int hpad;
284 int tpad;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000285
286 if (!skb)
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000287 skb = cfhsi_dequeue(cfhsi);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000288
289 if (!skb)
290 break;
291
292 /* Calculate needed head alignment and tail alignment. */
293 info = (struct caif_payload_info *)&skb->cb;
294
295 hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
296 tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);
297
298 /* Fill in CAIF frame length in descriptor. */
299 desc->cffrm_len[nfrms] = hpad + skb->len + tpad;
300
301 /* Fill head padding information. */
302 *pfrm = (u8)(hpad - 1);
303 pfrm += hpad;
304
305 /* Update network statistics. */
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000306 spin_lock_bh(&cfhsi->lock);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000307 cfhsi->ndev->stats.tx_packets++;
308 cfhsi->ndev->stats.tx_bytes += skb->len;
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000309 cfhsi_update_aggregation_stats(cfhsi, skb, -1);
310 spin_unlock_bh(&cfhsi->lock);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000311
312 /* Copy in CAIF frame. */
313 skb_copy_bits(skb, 0, pfrm, skb->len);
314
315 /* Update payload length. */
316 pld_len += desc->cffrm_len[nfrms];
317
318 /* Update frame pointer. */
319 pfrm += skb->len + tpad;
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000320
321 /* Consume the SKB */
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000322 consume_skb(skb);
323 skb = NULL;
324
325 /* Update number of frames. */
326 nfrms++;
327 }
328
329 /* Unused length fields should be zero-filled (according to SPEC). */
330 while (nfrms < CFHSI_MAX_PKTS) {
331 desc->cffrm_len[nfrms] = 0x0000;
332 nfrms++;
333 }
334
335 /* Check if we can piggy-back another descriptor. */
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000336 if (cfhsi_can_send_aggregate(cfhsi))
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000337 desc->header |= CFHSI_PIGGY_DESC;
338 else
339 desc->header &= ~CFHSI_PIGGY_DESC;
340
341 return CFHSI_DESC_SZ + pld_len;
342}
343
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000344static void cfhsi_start_tx(struct cfhsi *cfhsi)
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000345{
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000346 struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
347 int len, res;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000348
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000349 netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000350
351 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
352 return;
353
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000354 do {
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000355 /* Create HSI frame. */
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000356 len = cfhsi_tx_frm(desc, cfhsi);
357 if (!len) {
358 spin_lock_bh(&cfhsi->lock);
359 if (unlikely(cfhsi_tx_queue_len(cfhsi))) {
Dmitry Tarnyaginfe47f122011-10-13 11:29:23 +0000360 spin_unlock_bh(&cfhsi->lock);
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000361 res = -EAGAIN;
362 continue;
Dmitry Tarnyaginfe47f122011-10-13 11:29:23 +0000363 }
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000364 cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
365 /* Start inactivity timer. */
366 mod_timer(&cfhsi->inactivity_timer,
367 jiffies + cfhsi->inactivity_timeout);
368 spin_unlock_bh(&cfhsi->lock);
369 break;
370 }
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000371
372 /* Set up new transfer. */
Sjur Brændeland1c385f12012-06-25 07:49:42 +0000373 res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000374 if (WARN_ON(res < 0))
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000375 netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000376 __func__, res);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000377 } while (res < 0);
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000378}
Dmitry Tarnyaginfe47f122011-10-13 11:29:23 +0000379
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000380static void cfhsi_tx_done(struct cfhsi *cfhsi)
381{
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000382 netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000383
384 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
385 return;
386
387 /*
388 * Send flow on if flow off has been previously signalled
389 * and number of packets is below low water mark.
390 */
391 spin_lock_bh(&cfhsi->lock);
392 if (cfhsi->flow_off_sent &&
393 cfhsi_tx_queue_len(cfhsi) <= cfhsi->q_low_mark &&
394 cfhsi->cfdev.flowctrl) {
395
396 cfhsi->flow_off_sent = 0;
397 cfhsi->cfdev.flowctrl(cfhsi->ndev, ON);
398 }
399
400 if (cfhsi_can_send_aggregate(cfhsi)) {
401 spin_unlock_bh(&cfhsi->lock);
402 cfhsi_start_tx(cfhsi);
403 } else {
404 mod_timer(&cfhsi->aggregation_timer,
405 jiffies + cfhsi->aggregation_timeout);
406 spin_unlock_bh(&cfhsi->lock);
407 }
408
Dmitry Tarnyaginfe47f122011-10-13 11:29:23 +0000409 return;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000410}
411
Sjur Brændeland1c385f12012-06-25 07:49:42 +0000412static void cfhsi_tx_done_cb(struct cfhsi_cb_ops *cb_ops)
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000413{
414 struct cfhsi *cfhsi;
415
Sjur Brændeland1c385f12012-06-25 07:49:42 +0000416 cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000417 netdev_dbg(cfhsi->ndev, "%s.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000418 __func__);
419
420 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
421 return;
Daniel Martensson687b13e2011-10-13 11:29:25 +0000422 cfhsi_tx_done(cfhsi);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000423}
424
Daniel Martensson5bbed922011-10-13 11:29:28 +0000425static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000426{
427 int xfer_sz = 0;
428 int nfrms = 0;
429 u16 *plen = NULL;
430 u8 *pfrm = NULL;
431
432 if ((desc->header & ~CFHSI_PIGGY_DESC) ||
433 (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000434 netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000435 __func__);
Daniel Martensson5bbed922011-10-13 11:29:28 +0000436 return -EPROTO;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000437 }
438
439 /* Check for embedded CAIF frame. */
440 if (desc->offset) {
441 struct sk_buff *skb;
442 u8 *dst = NULL;
Daniel Martensson687b13e2011-10-13 11:29:25 +0000443 int len = 0;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000444 pfrm = ((u8 *)desc) + desc->offset;
445
446 /* Remove offset padding. */
447 pfrm += *pfrm + 1;
448
449 /* Read length of CAIF frame (little endian). */
450 len = *pfrm;
451 len |= ((*(pfrm+1)) << 8) & 0xFF00;
452 len += 2; /* Add FCS fields. */
453
Daniel Martensson5bbed922011-10-13 11:29:28 +0000454 /* Sanity check length of CAIF frame. */
455 if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000456 netdev_err(cfhsi->ndev, "%s: Invalid length.\n",
Daniel Martensson5bbed922011-10-13 11:29:28 +0000457 __func__);
458 return -EPROTO;
459 }
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000460
461 /* Allocate SKB (OK even in IRQ context). */
Daniel Martensson687b13e2011-10-13 11:29:25 +0000462 skb = alloc_skb(len + 1, GFP_ATOMIC);
463 if (!skb) {
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000464 netdev_err(cfhsi->ndev, "%s: Out of memory !\n",
Daniel Martensson687b13e2011-10-13 11:29:25 +0000465 __func__);
466 return -ENOMEM;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000467 }
468 caif_assert(skb != NULL);
469
470 dst = skb_put(skb, len);
471 memcpy(dst, pfrm, len);
472
473 skb->protocol = htons(ETH_P_CAIF);
474 skb_reset_mac_header(skb);
475 skb->dev = cfhsi->ndev;
476
477 /*
Sjur Brændeland1c385f12012-06-25 07:49:42 +0000478 * We are in a callback handler and
479 * unfortunately we don't know what context we're
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000480 * running in.
481 */
482 if (in_interrupt())
483 netif_rx(skb);
484 else
485 netif_rx_ni(skb);
486
487 /* Update network statistics. */
488 cfhsi->ndev->stats.rx_packets++;
489 cfhsi->ndev->stats.rx_bytes += len;
490 }
491
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000492 /* Calculate transfer length. */
493 plen = desc->cffrm_len;
494 while (nfrms < CFHSI_MAX_PKTS && *plen) {
495 xfer_sz += *plen;
496 plen++;
497 nfrms++;
498 }
499
500 /* Check for piggy-backed descriptor. */
501 if (desc->header & CFHSI_PIGGY_DESC)
502 xfer_sz += CFHSI_DESC_SZ;
503
Daniel Martensson5bbed922011-10-13 11:29:28 +0000504 if ((xfer_sz % 4) || (xfer_sz > (CFHSI_BUF_SZ_RX - CFHSI_DESC_SZ))) {
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000505 netdev_err(cfhsi->ndev,
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000506 "%s: Invalid payload len: %d, ignored.\n",
507 __func__, xfer_sz);
Daniel Martensson5bbed922011-10-13 11:29:28 +0000508 return -EPROTO;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000509 }
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000510 return xfer_sz;
511}
512
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +0000513static int cfhsi_rx_desc_len(struct cfhsi_desc *desc)
514{
515 int xfer_sz = 0;
516 int nfrms = 0;
517 u16 *plen;
518
519 if ((desc->header & ~CFHSI_PIGGY_DESC) ||
520 (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
521
522 pr_err("Invalid descriptor. %x %x\n", desc->header,
523 desc->offset);
524 return -EPROTO;
525 }
526
527 /* Calculate transfer length. */
528 plen = desc->cffrm_len;
529 while (nfrms < CFHSI_MAX_PKTS && *plen) {
530 xfer_sz += *plen;
531 plen++;
532 nfrms++;
533 }
534
535 if (xfer_sz % 4) {
536 pr_err("Invalid payload len: %d, ignored.\n", xfer_sz);
537 return -EPROTO;
538 }
539 return xfer_sz;
540}
541
Daniel Martensson5bbed922011-10-13 11:29:28 +0000542static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000543{
544 int rx_sz = 0;
545 int nfrms = 0;
546 u16 *plen = NULL;
547 u8 *pfrm = NULL;
548
549 /* Sanity check header and offset. */
550 if (WARN_ON((desc->header & ~CFHSI_PIGGY_DESC) ||
551 (desc->offset > CFHSI_MAX_EMB_FRM_SZ))) {
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000552 netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000553 __func__);
Daniel Martensson5bbed922011-10-13 11:29:28 +0000554 return -EPROTO;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000555 }
556
557 /* Set frame pointer to start of payload. */
558 pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
559 plen = desc->cffrm_len;
Daniel Martensson687b13e2011-10-13 11:29:25 +0000560
561 /* Skip already processed frames. */
562 while (nfrms < cfhsi->rx_state.nfrms) {
563 pfrm += *plen;
564 rx_sz += *plen;
565 plen++;
566 nfrms++;
567 }
568
569 /* Parse payload. */
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000570 while (nfrms < CFHSI_MAX_PKTS && *plen) {
571 struct sk_buff *skb;
572 u8 *dst = NULL;
573 u8 *pcffrm = NULL;
Sjur Brændelandb42f7b52012-06-25 07:49:39 +0000574 int len;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000575
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000576 /* CAIF frame starts after head padding. */
577 pcffrm = pfrm + *pfrm + 1;
578
579 /* Read length of CAIF frame (little endian). */
580 len = *pcffrm;
581 len |= ((*(pcffrm + 1)) << 8) & 0xFF00;
582 len += 2; /* Add FCS fields. */
583
Daniel Martensson5bbed922011-10-13 11:29:28 +0000584 /* Sanity check length of CAIF frames. */
585 if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000586 netdev_err(cfhsi->ndev, "%s: Invalid length.\n",
Daniel Martensson5bbed922011-10-13 11:29:28 +0000587 __func__);
588 return -EPROTO;
589 }
590
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000591 /* Allocate SKB (OK even in IRQ context). */
Daniel Martensson687b13e2011-10-13 11:29:25 +0000592 skb = alloc_skb(len + 1, GFP_ATOMIC);
593 if (!skb) {
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000594 netdev_err(cfhsi->ndev, "%s: Out of memory !\n",
Daniel Martensson687b13e2011-10-13 11:29:25 +0000595 __func__);
596 cfhsi->rx_state.nfrms = nfrms;
597 return -ENOMEM;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000598 }
599 caif_assert(skb != NULL);
600
601 dst = skb_put(skb, len);
602 memcpy(dst, pcffrm, len);
603
604 skb->protocol = htons(ETH_P_CAIF);
605 skb_reset_mac_header(skb);
606 skb->dev = cfhsi->ndev;
607
608 /*
Sjur Brændeland1c385f12012-06-25 07:49:42 +0000609 * We're called in callback from HSI
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000610 * and don't know the context we're running in.
611 */
612 if (in_interrupt())
613 netif_rx(skb);
614 else
615 netif_rx_ni(skb);
616
617 /* Update network statistics. */
618 cfhsi->ndev->stats.rx_packets++;
619 cfhsi->ndev->stats.rx_bytes += len;
620
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000621 pfrm += *plen;
622 rx_sz += *plen;
623 plen++;
624 nfrms++;
625 }
626
627 return rx_sz;
628}
629
Daniel Martensson687b13e2011-10-13 11:29:25 +0000630static void cfhsi_rx_done(struct cfhsi *cfhsi)
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000631{
632 int res;
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +0000633 int desc_pld_len = 0, rx_len, rx_state;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000634 struct cfhsi_desc *desc = NULL;
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +0000635 u8 *rx_ptr, *rx_buf;
636 struct cfhsi_desc *piggy_desc = NULL;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000637
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000638 desc = (struct cfhsi_desc *)cfhsi->rx_buf;
639
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000640 netdev_dbg(cfhsi->ndev, "%s\n", __func__);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000641
642 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
643 return;
644
645 /* Update inactivity timer if pending. */
Dmitry Tarnyagin73033c92011-10-13 11:29:24 +0000646 spin_lock_bh(&cfhsi->lock);
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000647 mod_timer_pending(&cfhsi->inactivity_timer,
Dmitry Tarnyagin28bd2042011-10-13 11:29:27 +0000648 jiffies + cfhsi->inactivity_timeout);
Dmitry Tarnyagin73033c92011-10-13 11:29:24 +0000649 spin_unlock_bh(&cfhsi->lock);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000650
Daniel Martensson687b13e2011-10-13 11:29:25 +0000651 if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +0000652 desc_pld_len = cfhsi_rx_desc_len(desc);
653
654 if (desc_pld_len < 0)
Daniel Martensson5bbed922011-10-13 11:29:28 +0000655 goto out_of_sync;
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +0000656
657 rx_buf = cfhsi->rx_buf;
658 rx_len = desc_pld_len;
659 if (desc_pld_len > 0 && (desc->header & CFHSI_PIGGY_DESC))
660 rx_len += CFHSI_DESC_SZ;
661 if (desc_pld_len == 0)
662 rx_buf = cfhsi->rx_flip_buf;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000663 } else {
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +0000664 rx_buf = cfhsi->rx_flip_buf;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000665
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +0000666 rx_len = CFHSI_DESC_SZ;
667 if (cfhsi->rx_state.pld_len > 0 &&
668 (desc->header & CFHSI_PIGGY_DESC)) {
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000669
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000670 piggy_desc = (struct cfhsi_desc *)
671 (desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ +
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +0000672 cfhsi->rx_state.pld_len);
673
Daniel Martensson687b13e2011-10-13 11:29:25 +0000674 cfhsi->rx_state.piggy_desc = true;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000675
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +0000676 /* Extract payload len from piggy-backed descriptor. */
677 desc_pld_len = cfhsi_rx_desc_len(piggy_desc);
678 if (desc_pld_len < 0)
679 goto out_of_sync;
680
Kim Lilliestierna XX4e7bb592012-06-25 07:49:36 +0000681 if (desc_pld_len > 0) {
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +0000682 rx_len = desc_pld_len;
Kim Lilliestierna XX4e7bb592012-06-25 07:49:36 +0000683 if (piggy_desc->header & CFHSI_PIGGY_DESC)
684 rx_len += CFHSI_DESC_SZ;
685 }
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000686
687 /*
688 * Copy needed information from the piggy-backed
689 * descriptor to the descriptor in the start.
690 */
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +0000691 memcpy(rx_buf, (u8 *)piggy_desc,
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000692 CFHSI_DESC_SHORT_SZ);
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +0000693 /* Mark no embedded frame here */
694 piggy_desc->offset = 0;
Daniel Martensson5bbed922011-10-13 11:29:28 +0000695 }
Daniel Martensson687b13e2011-10-13 11:29:25 +0000696 }
697
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000698 if (desc_pld_len) {
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +0000699 rx_state = CFHSI_RX_STATE_PAYLOAD;
700 rx_ptr = rx_buf + CFHSI_DESC_SZ;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000701 } else {
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +0000702 rx_state = CFHSI_RX_STATE_DESC;
703 rx_ptr = rx_buf;
704 rx_len = CFHSI_DESC_SZ;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000705 }
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000706
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +0000707 /* Initiate next read */
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000708 if (test_bit(CFHSI_AWAKE, &cfhsi->bits)) {
709 /* Set up new transfer. */
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000710 netdev_dbg(cfhsi->ndev, "%s: Start RX.\n",
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +0000711 __func__);
712
Sjur Brændeland1c385f12012-06-25 07:49:42 +0000713 res = cfhsi->ops->cfhsi_rx(rx_ptr, rx_len,
714 cfhsi->ops);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000715 if (WARN_ON(res < 0)) {
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000716 netdev_err(cfhsi->ndev, "%s: RX error %d.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000717 __func__, res);
718 cfhsi->ndev->stats.rx_errors++;
719 cfhsi->ndev->stats.rx_dropped++;
720 }
721 }
Daniel Martensson687b13e2011-10-13 11:29:25 +0000722
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +0000723 if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
724 /* Extract payload from descriptor */
725 if (cfhsi_rx_desc(desc, cfhsi) < 0)
726 goto out_of_sync;
727 } else {
728 /* Extract payload */
729 if (cfhsi_rx_pld(desc, cfhsi) < 0)
730 goto out_of_sync;
731 if (piggy_desc) {
732 /* Extract any payload in piggyback descriptor. */
733 if (cfhsi_rx_desc(piggy_desc, cfhsi) < 0)
734 goto out_of_sync;
735 }
Daniel Martensson687b13e2011-10-13 11:29:25 +0000736 }
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +0000737
738 /* Update state info */
739 memset(&cfhsi->rx_state, 0, sizeof(cfhsi->rx_state));
740 cfhsi->rx_state.state = rx_state;
741 cfhsi->rx_ptr = rx_ptr;
742 cfhsi->rx_len = rx_len;
743 cfhsi->rx_state.pld_len = desc_pld_len;
744 cfhsi->rx_state.piggy_desc = desc->header & CFHSI_PIGGY_DESC;
745
746 if (rx_buf != cfhsi->rx_buf)
747 swap(cfhsi->rx_buf, cfhsi->rx_flip_buf);
Daniel Martensson5bbed922011-10-13 11:29:28 +0000748 return;
749
750out_of_sync:
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000751 netdev_err(cfhsi->ndev, "%s: Out of sync.\n", __func__);
Daniel Martensson5bbed922011-10-13 11:29:28 +0000752 print_hex_dump_bytes("--> ", DUMP_PREFIX_NONE,
753 cfhsi->rx_buf, CFHSI_DESC_SZ);
754 schedule_work(&cfhsi->out_of_sync_work);
Daniel Martensson687b13e2011-10-13 11:29:25 +0000755}
756
757static void cfhsi_rx_slowpath(unsigned long arg)
758{
759 struct cfhsi *cfhsi = (struct cfhsi *)arg;
760
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000761 netdev_dbg(cfhsi->ndev, "%s.\n",
Daniel Martensson687b13e2011-10-13 11:29:25 +0000762 __func__);
763
764 cfhsi_rx_done(cfhsi);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000765}
766
Sjur Brændeland1c385f12012-06-25 07:49:42 +0000767static void cfhsi_rx_done_cb(struct cfhsi_cb_ops *cb_ops)
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000768{
769 struct cfhsi *cfhsi;
770
Sjur Brændeland1c385f12012-06-25 07:49:42 +0000771 cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000772 netdev_dbg(cfhsi->ndev, "%s.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000773 __func__);
774
775 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
776 return;
777
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000778 if (test_and_clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits))
779 wake_up_interruptible(&cfhsi->flush_fifo_wait);
780 else
Daniel Martensson687b13e2011-10-13 11:29:25 +0000781 cfhsi_rx_done(cfhsi);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000782}
783
784static void cfhsi_wake_up(struct work_struct *work)
785{
786 struct cfhsi *cfhsi = NULL;
787 int res;
788 int len;
789 long ret;
790
791 cfhsi = container_of(work, struct cfhsi, wake_up_work);
792
793 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
794 return;
795
796 if (unlikely(test_bit(CFHSI_AWAKE, &cfhsi->bits))) {
797 /* It happenes when wakeup is requested by
798 * both ends at the same time. */
799 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
Daniel Martensson5ea2ef52011-10-13 11:29:29 +0000800 clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000801 return;
802 }
803
804 /* Activate wake line. */
Sjur Brændeland1c385f12012-06-25 07:49:42 +0000805 cfhsi->ops->cfhsi_wake_up(cfhsi->ops);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000806
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000807 netdev_dbg(cfhsi->ndev, "%s: Start waiting.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000808 __func__);
809
810 /* Wait for acknowledge. */
Daniel Martensson687b13e2011-10-13 11:29:25 +0000811 ret = CFHSI_WAKE_TOUT;
812 ret = wait_event_interruptible_timeout(cfhsi->wake_up_wait,
813 test_and_clear_bit(CFHSI_WAKE_UP_ACK,
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000814 &cfhsi->bits), ret);
815 if (unlikely(ret < 0)) {
816 /* Interrupted by signal. */
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000817 netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000818 __func__, ret);
Daniel Martensson5ea2ef52011-10-13 11:29:29 +0000819
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000820 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
Sjur Brændeland1c385f12012-06-25 07:49:42 +0000821 cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000822 return;
823 } else if (!ret) {
Daniel Martensson5ea2ef52011-10-13 11:29:29 +0000824 bool ca_wake = false;
825 size_t fifo_occupancy = 0;
826
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000827 /* Wakeup timeout */
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000828 netdev_dbg(cfhsi->ndev, "%s: Timeout.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000829 __func__);
Daniel Martensson5ea2ef52011-10-13 11:29:29 +0000830
831 /* Check FIFO to check if modem has sent something. */
Sjur Brændeland1c385f12012-06-25 07:49:42 +0000832 WARN_ON(cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
Daniel Martensson5ea2ef52011-10-13 11:29:29 +0000833 &fifo_occupancy));
834
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000835 netdev_dbg(cfhsi->ndev, "%s: Bytes in FIFO: %u.\n",
Daniel Martensson5ea2ef52011-10-13 11:29:29 +0000836 __func__, (unsigned) fifo_occupancy);
837
838 /* Check if we misssed the interrupt. */
Sjur Brændeland1c385f12012-06-25 07:49:42 +0000839 WARN_ON(cfhsi->ops->cfhsi_get_peer_wake(cfhsi->ops,
Daniel Martensson5ea2ef52011-10-13 11:29:29 +0000840 &ca_wake));
841
842 if (ca_wake) {
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000843 netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n",
Daniel Martensson5ea2ef52011-10-13 11:29:29 +0000844 __func__);
845
846 /* Clear the CFHSI_WAKE_UP_ACK bit to prevent race. */
847 clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
848
849 /* Continue execution. */
850 goto wake_ack;
851 }
852
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000853 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
Sjur Brændeland1c385f12012-06-25 07:49:42 +0000854 cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000855 return;
856 }
Daniel Martensson5ea2ef52011-10-13 11:29:29 +0000857wake_ack:
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000858 netdev_dbg(cfhsi->ndev, "%s: Woken.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000859 __func__);
860
861 /* Clear power up bit. */
862 set_bit(CFHSI_AWAKE, &cfhsi->bits);
863 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
864
865 /* Resume read operation. */
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000866 netdev_dbg(cfhsi->ndev, "%s: Start RX.\n", __func__);
Sjur Brændeland1c385f12012-06-25 07:49:42 +0000867 res = cfhsi->ops->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len, cfhsi->ops);
Daniel Martensson687b13e2011-10-13 11:29:25 +0000868
869 if (WARN_ON(res < 0))
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000870 netdev_err(cfhsi->ndev, "%s: RX err %d.\n", __func__, res);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000871
872 /* Clear power up acknowledment. */
873 clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
874
875 spin_lock_bh(&cfhsi->lock);
876
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000877 /* Resume transmit if queues are not empty. */
878 if (!cfhsi_tx_queue_len(cfhsi)) {
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000879 netdev_dbg(cfhsi->ndev, "%s: Peer wake, start timer.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000880 __func__);
881 /* Start inactivity timer. */
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000882 mod_timer(&cfhsi->inactivity_timer,
Dmitry Tarnyagin28bd2042011-10-13 11:29:27 +0000883 jiffies + cfhsi->inactivity_timeout);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000884 spin_unlock_bh(&cfhsi->lock);
885 return;
886 }
887
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000888 netdev_dbg(cfhsi->ndev, "%s: Host wake.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000889 __func__);
890
891 spin_unlock_bh(&cfhsi->lock);
892
893 /* Create HSI frame. */
894 len = cfhsi_tx_frm((struct cfhsi_desc *)cfhsi->tx_buf, cfhsi);
895
896 if (likely(len > 0)) {
897 /* Set up new transfer. */
Sjur Brændeland1c385f12012-06-25 07:49:42 +0000898 res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000899 if (WARN_ON(res < 0)) {
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000900 netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000901 __func__, res);
902 cfhsi_abort_tx(cfhsi);
903 }
904 } else {
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000905 netdev_err(cfhsi->ndev,
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000906 "%s: Failed to create HSI frame: %d.\n",
907 __func__, len);
908 }
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000909}
910
911static void cfhsi_wake_down(struct work_struct *work)
912{
913 long ret;
914 struct cfhsi *cfhsi = NULL;
Daniel Martensson687b13e2011-10-13 11:29:25 +0000915 size_t fifo_occupancy = 0;
916 int retry = CFHSI_WAKE_TOUT;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000917
918 cfhsi = container_of(work, struct cfhsi, wake_down_work);
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000919 netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000920
921 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
922 return;
923
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000924 /* Deactivate wake line. */
Sjur Brændeland1c385f12012-06-25 07:49:42 +0000925 cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000926
927 /* Wait for acknowledge. */
Daniel Martensson687b13e2011-10-13 11:29:25 +0000928 ret = CFHSI_WAKE_TOUT;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000929 ret = wait_event_interruptible_timeout(cfhsi->wake_down_wait,
Daniel Martensson687b13e2011-10-13 11:29:25 +0000930 test_and_clear_bit(CFHSI_WAKE_DOWN_ACK,
931 &cfhsi->bits), ret);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000932 if (ret < 0) {
933 /* Interrupted by signal. */
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000934 netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000935 __func__, ret);
936 return;
937 } else if (!ret) {
Daniel Martensson5ea2ef52011-10-13 11:29:29 +0000938 bool ca_wake = true;
939
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000940 /* Timeout */
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000941 netdev_err(cfhsi->ndev, "%s: Timeout.\n", __func__);
Daniel Martensson5ea2ef52011-10-13 11:29:29 +0000942
943 /* Check if we misssed the interrupt. */
Sjur Brændeland1c385f12012-06-25 07:49:42 +0000944 WARN_ON(cfhsi->ops->cfhsi_get_peer_wake(cfhsi->ops,
Daniel Martensson5ea2ef52011-10-13 11:29:29 +0000945 &ca_wake));
946 if (!ca_wake)
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000947 netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n",
Daniel Martensson5ea2ef52011-10-13 11:29:29 +0000948 __func__);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000949 }
950
Daniel Martensson687b13e2011-10-13 11:29:25 +0000951 /* Check FIFO occupancy. */
952 while (retry) {
Sjur Brændeland1c385f12012-06-25 07:49:42 +0000953 WARN_ON(cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
Daniel Martensson687b13e2011-10-13 11:29:25 +0000954 &fifo_occupancy));
955
956 if (!fifo_occupancy)
957 break;
958
959 set_current_state(TASK_INTERRUPTIBLE);
960 schedule_timeout(1);
961 retry--;
962 }
963
964 if (!retry)
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000965 netdev_err(cfhsi->ndev, "%s: FIFO Timeout.\n", __func__);
Daniel Martensson687b13e2011-10-13 11:29:25 +0000966
967 /* Clear AWAKE condition. */
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000968 clear_bit(CFHSI_AWAKE, &cfhsi->bits);
969
Daniel Martensson687b13e2011-10-13 11:29:25 +0000970 /* Cancel pending RX requests. */
Sjur Brændeland1c385f12012-06-25 07:49:42 +0000971 cfhsi->ops->cfhsi_rx_cancel(cfhsi->ops);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000972}
973
Daniel Martensson5bbed922011-10-13 11:29:28 +0000974static void cfhsi_out_of_sync(struct work_struct *work)
975{
976 struct cfhsi *cfhsi = NULL;
977
978 cfhsi = container_of(work, struct cfhsi, out_of_sync_work);
979
980 rtnl_lock();
981 dev_close(cfhsi->ndev);
982 rtnl_unlock();
983}
984
Sjur Brændeland1c385f12012-06-25 07:49:42 +0000985static void cfhsi_wake_up_cb(struct cfhsi_cb_ops *cb_ops)
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000986{
987 struct cfhsi *cfhsi = NULL;
988
Sjur Brændeland1c385f12012-06-25 07:49:42 +0000989 cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000990 netdev_dbg(cfhsi->ndev, "%s.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000991 __func__);
992
993 set_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
994 wake_up_interruptible(&cfhsi->wake_up_wait);
995
996 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
997 return;
998
999 /* Schedule wake up work queue if the peer initiates. */
1000 if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
1001 queue_work(cfhsi->wq, &cfhsi->wake_up_work);
1002}
1003
Sjur Brændeland1c385f12012-06-25 07:49:42 +00001004static void cfhsi_wake_down_cb(struct cfhsi_cb_ops *cb_ops)
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001005{
1006 struct cfhsi *cfhsi = NULL;
1007
Sjur Brændeland1c385f12012-06-25 07:49:42 +00001008 cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
Sjur Brændeland90de9bab2012-06-25 07:49:38 +00001009 netdev_dbg(cfhsi->ndev, "%s.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001010 __func__);
1011
1012 /* Initiating low power is only permitted by the host (us). */
1013 set_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
1014 wake_up_interruptible(&cfhsi->wake_down_wait);
1015}
1016
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +00001017static void cfhsi_aggregation_tout(unsigned long arg)
1018{
1019 struct cfhsi *cfhsi = (struct cfhsi *)arg;
1020
Sjur Brændeland90de9bab2012-06-25 07:49:38 +00001021 netdev_dbg(cfhsi->ndev, "%s.\n",
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +00001022 __func__);
1023
1024 cfhsi_start_tx(cfhsi);
1025}
1026
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001027static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
1028{
1029 struct cfhsi *cfhsi = NULL;
1030 int start_xfer = 0;
1031 int timer_active;
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +00001032 int prio;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001033
1034 if (!dev)
1035 return -EINVAL;
1036
1037 cfhsi = netdev_priv(dev);
1038
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +00001039 switch (skb->priority) {
1040 case TC_PRIO_BESTEFFORT:
1041 case TC_PRIO_FILLER:
1042 case TC_PRIO_BULK:
1043 prio = CFHSI_PRIO_BEBK;
1044 break;
1045 case TC_PRIO_INTERACTIVE_BULK:
1046 prio = CFHSI_PRIO_VI;
1047 break;
1048 case TC_PRIO_INTERACTIVE:
1049 prio = CFHSI_PRIO_VO;
1050 break;
1051 case TC_PRIO_CONTROL:
1052 default:
1053 prio = CFHSI_PRIO_CTL;
1054 break;
1055 }
1056
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001057 spin_lock_bh(&cfhsi->lock);
1058
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +00001059 /* Update aggregation statistics */
1060 cfhsi_update_aggregation_stats(cfhsi, skb, 1);
1061
1062 /* Queue the SKB */
1063 skb_queue_tail(&cfhsi->qhead[prio], skb);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001064
1065 /* Sanity check; xmit should not be called after unregister_netdev */
1066 if (WARN_ON(test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))) {
1067 spin_unlock_bh(&cfhsi->lock);
1068 cfhsi_abort_tx(cfhsi);
1069 return -EINVAL;
1070 }
1071
1072 /* Send flow off if number of packets is above high water mark. */
1073 if (!cfhsi->flow_off_sent &&
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +00001074 cfhsi_tx_queue_len(cfhsi) > cfhsi->q_high_mark &&
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001075 cfhsi->cfdev.flowctrl) {
1076 cfhsi->flow_off_sent = 1;
1077 cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF);
1078 }
1079
1080 if (cfhsi->tx_state == CFHSI_TX_STATE_IDLE) {
1081 cfhsi->tx_state = CFHSI_TX_STATE_XFER;
1082 start_xfer = 1;
1083 }
1084
Dmitry Tarnyagin73033c92011-10-13 11:29:24 +00001085 if (!start_xfer) {
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +00001086 /* Send aggregate if it is possible */
1087 bool aggregate_ready =
1088 cfhsi_can_send_aggregate(cfhsi) &&
1089 del_timer(&cfhsi->aggregation_timer) > 0;
Dmitry Tarnyagin73033c92011-10-13 11:29:24 +00001090 spin_unlock_bh(&cfhsi->lock);
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +00001091 if (aggregate_ready)
1092 cfhsi_start_tx(cfhsi);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001093 return 0;
Dmitry Tarnyagin73033c92011-10-13 11:29:24 +00001094 }
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001095
1096 /* Delete inactivity timer if started. */
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +00001097 timer_active = del_timer_sync(&cfhsi->inactivity_timer);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001098
Dmitry Tarnyagin73033c92011-10-13 11:29:24 +00001099 spin_unlock_bh(&cfhsi->lock);
1100
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001101 if (timer_active) {
1102 struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
1103 int len;
1104 int res;
1105
1106 /* Create HSI frame. */
1107 len = cfhsi_tx_frm(desc, cfhsi);
Roar Førdef84ea772011-12-06 12:15:44 +00001108 WARN_ON(!len);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001109
1110 /* Set up new transfer. */
Sjur Brændeland1c385f12012-06-25 07:49:42 +00001111 res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001112 if (WARN_ON(res < 0)) {
Sjur Brændeland90de9bab2012-06-25 07:49:38 +00001113 netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001114 __func__, res);
1115 cfhsi_abort_tx(cfhsi);
1116 }
1117 } else {
1118 /* Schedule wake up work queue if the we initiate. */
1119 if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
1120 queue_work(cfhsi->wq, &cfhsi->wake_up_work);
1121 }
1122
1123 return 0;
1124}
1125
Sjur Brændeland1c385f12012-06-25 07:49:42 +00001126static const struct net_device_ops cfhsi_netdevops;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001127
1128static void cfhsi_setup(struct net_device *dev)
1129{
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +00001130 int i;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001131 struct cfhsi *cfhsi = netdev_priv(dev);
1132 dev->features = 0;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001133 dev->type = ARPHRD_CAIF;
1134 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
Sjur Brændeland34efc282012-03-04 08:38:58 +00001135 dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001136 dev->tx_queue_len = 0;
1137 dev->destructor = free_netdev;
Sjur Brændeland1c385f12012-06-25 07:49:42 +00001138 dev->netdev_ops = &cfhsi_netdevops;
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +00001139 for (i = 0; i < CFHSI_PRIO_LAST; ++i)
1140 skb_queue_head_init(&cfhsi->qhead[i]);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001141 cfhsi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
1142 cfhsi->cfdev.use_frag = false;
1143 cfhsi->cfdev.use_stx = false;
1144 cfhsi->cfdev.use_fcs = false;
1145 cfhsi->ndev = dev;
1146}
1147
sjur.brandeland@stericsson.com39abbae2012-04-12 08:27:27 +00001148static int cfhsi_open(struct net_device *ndev)
1149{
1150 struct cfhsi *cfhsi = netdev_priv(ndev);
1151 int res;
1152
1153 clear_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
1154
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001155 /* Initialize state vaiables. */
1156 cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
Daniel Martensson687b13e2011-10-13 11:29:25 +00001157 cfhsi->rx_state.state = CFHSI_RX_STATE_DESC;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001158
1159 /* Set flow info */
1160 cfhsi->flow_off_sent = 0;
1161 cfhsi->q_low_mark = LOW_WATER_MARK;
1162 cfhsi->q_high_mark = HIGH_WATER_MARK;
1163
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001164
1165 /*
1166 * Allocate a TX buffer with the size of a HSI packet descriptors
1167 * and the necessary room for CAIF payload frames.
1168 */
1169 cfhsi->tx_buf = kzalloc(CFHSI_BUF_SZ_TX, GFP_KERNEL);
1170 if (!cfhsi->tx_buf) {
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001171 res = -ENODEV;
1172 goto err_alloc_tx;
1173 }
1174
1175 /*
1176 * Allocate a RX buffer with the size of two HSI packet descriptors and
1177 * the necessary room for CAIF payload frames.
1178 */
1179 cfhsi->rx_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL);
1180 if (!cfhsi->rx_buf) {
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001181 res = -ENODEV;
1182 goto err_alloc_rx;
1183 }
1184
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +00001185 cfhsi->rx_flip_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL);
1186 if (!cfhsi->rx_flip_buf) {
1187 res = -ENODEV;
1188 goto err_alloc_rx_flip;
1189 }
1190
Dmitry Tarnyagin28bd2042011-10-13 11:29:27 +00001191 /* Pre-calculate inactivity timeout. */
1192 if (inactivity_timeout != -1) {
1193 cfhsi->inactivity_timeout =
1194 inactivity_timeout * HZ / 1000;
1195 if (!cfhsi->inactivity_timeout)
1196 cfhsi->inactivity_timeout = 1;
1197 else if (cfhsi->inactivity_timeout > NEXT_TIMER_MAX_DELTA)
1198 cfhsi->inactivity_timeout = NEXT_TIMER_MAX_DELTA;
1199 } else {
1200 cfhsi->inactivity_timeout = NEXT_TIMER_MAX_DELTA;
1201 }
1202
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +00001203 /* Initialize aggregation timeout */
1204 cfhsi->aggregation_timeout = aggregation_timeout;
1205
Dmitry Tarnyagin28bd2042011-10-13 11:29:27 +00001206 /* Initialize recieve vaiables. */
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001207 cfhsi->rx_ptr = cfhsi->rx_buf;
1208 cfhsi->rx_len = CFHSI_DESC_SZ;
1209
1210 /* Initialize spin locks. */
1211 spin_lock_init(&cfhsi->lock);
1212
1213 /* Set up the driver. */
Sjur Brændeland1c385f12012-06-25 07:49:42 +00001214 cfhsi->cb_ops.tx_done_cb = cfhsi_tx_done_cb;
1215 cfhsi->cb_ops.rx_done_cb = cfhsi_rx_done_cb;
1216 cfhsi->cb_ops.wake_up_cb = cfhsi_wake_up_cb;
1217 cfhsi->cb_ops.wake_down_cb = cfhsi_wake_down_cb;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001218
1219 /* Initialize the work queues. */
1220 INIT_WORK(&cfhsi->wake_up_work, cfhsi_wake_up);
1221 INIT_WORK(&cfhsi->wake_down_work, cfhsi_wake_down);
Daniel Martensson5bbed922011-10-13 11:29:28 +00001222 INIT_WORK(&cfhsi->out_of_sync_work, cfhsi_out_of_sync);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001223
1224 /* Clear all bit fields. */
1225 clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
1226 clear_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
1227 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
1228 clear_bit(CFHSI_AWAKE, &cfhsi->bits);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001229
1230 /* Create work thread. */
Sjur Brændeland1c385f12012-06-25 07:49:42 +00001231 cfhsi->wq = create_singlethread_workqueue(cfhsi->ndev->name);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001232 if (!cfhsi->wq) {
Sjur Brændeland90de9bab2012-06-25 07:49:38 +00001233 netdev_err(cfhsi->ndev, "%s: Failed to create work queue.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001234 __func__);
1235 res = -ENODEV;
1236 goto err_create_wq;
1237 }
1238
1239 /* Initialize wait queues. */
1240 init_waitqueue_head(&cfhsi->wake_up_wait);
1241 init_waitqueue_head(&cfhsi->wake_down_wait);
1242 init_waitqueue_head(&cfhsi->flush_fifo_wait);
1243
1244 /* Setup the inactivity timer. */
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +00001245 init_timer(&cfhsi->inactivity_timer);
1246 cfhsi->inactivity_timer.data = (unsigned long)cfhsi;
1247 cfhsi->inactivity_timer.function = cfhsi_inactivity_tout;
Daniel Martensson687b13e2011-10-13 11:29:25 +00001248 /* Setup the slowpath RX timer. */
1249 init_timer(&cfhsi->rx_slowpath_timer);
1250 cfhsi->rx_slowpath_timer.data = (unsigned long)cfhsi;
1251 cfhsi->rx_slowpath_timer.function = cfhsi_rx_slowpath;
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +00001252 /* Setup the aggregation timer. */
1253 init_timer(&cfhsi->aggregation_timer);
1254 cfhsi->aggregation_timer.data = (unsigned long)cfhsi;
1255 cfhsi->aggregation_timer.function = cfhsi_aggregation_tout;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001256
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001257 /* Activate HSI interface. */
Sjur Brændeland1c385f12012-06-25 07:49:42 +00001258 res = cfhsi->ops->cfhsi_up(cfhsi->ops);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001259 if (res) {
Sjur Brændeland90de9bab2012-06-25 07:49:38 +00001260 netdev_err(cfhsi->ndev,
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001261 "%s: can't activate HSI interface: %d.\n",
1262 __func__, res);
1263 goto err_activate;
1264 }
1265
1266 /* Flush FIFO */
1267 res = cfhsi_flush_fifo(cfhsi);
1268 if (res) {
Sjur Brændeland90de9bab2012-06-25 07:49:38 +00001269 netdev_err(cfhsi->ndev, "%s: Can't flush FIFO: %d.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001270 __func__, res);
1271 goto err_net_reg;
1272 }
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001273 return res;
1274
1275 err_net_reg:
Sjur Brændeland1c385f12012-06-25 07:49:42 +00001276 cfhsi->ops->cfhsi_down(cfhsi->ops);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001277 err_activate:
1278 destroy_workqueue(cfhsi->wq);
1279 err_create_wq:
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +00001280 kfree(cfhsi->rx_flip_buf);
1281 err_alloc_rx_flip:
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001282 kfree(cfhsi->rx_buf);
1283 err_alloc_rx:
1284 kfree(cfhsi->tx_buf);
1285 err_alloc_tx:
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001286 return res;
1287}
1288
sjur.brandeland@stericsson.com39abbae2012-04-12 08:27:27 +00001289static int cfhsi_close(struct net_device *ndev)
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001290{
sjur.brandeland@stericsson.com39abbae2012-04-12 08:27:27 +00001291 struct cfhsi *cfhsi = netdev_priv(ndev);
sjur.brandeland@stericsson.com5f614e62012-04-12 08:18:08 +00001292 u8 *tx_buf, *rx_buf, *flip_buf;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001293
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001294 /* going to shutdown driver */
1295 set_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
1296
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001297 /* Flush workqueue */
1298 flush_workqueue(cfhsi->wq);
1299
Daniel Martensson687b13e2011-10-13 11:29:25 +00001300 /* Delete timers if pending */
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +00001301 del_timer_sync(&cfhsi->inactivity_timer);
Daniel Martensson687b13e2011-10-13 11:29:25 +00001302 del_timer_sync(&cfhsi->rx_slowpath_timer);
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +00001303 del_timer_sync(&cfhsi->aggregation_timer);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001304
1305 /* Cancel pending RX request (if any) */
Sjur Brændeland1c385f12012-06-25 07:49:42 +00001306 cfhsi->ops->cfhsi_rx_cancel(cfhsi->ops);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001307
Daniel Martenssonca63f8c2011-10-13 11:29:26 +00001308 /* Destroy workqueue */
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001309 destroy_workqueue(cfhsi->wq);
1310
1311 /* Store bufferes: will be freed later. */
1312 tx_buf = cfhsi->tx_buf;
1313 rx_buf = cfhsi->rx_buf;
sjur.brandeland@stericsson.com5f614e62012-04-12 08:18:08 +00001314 flip_buf = cfhsi->rx_flip_buf;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001315 /* Flush transmit queues. */
1316 cfhsi_abort_tx(cfhsi);
1317
1318 /* Deactivate interface */
Sjur Brændeland1c385f12012-06-25 07:49:42 +00001319 cfhsi->ops->cfhsi_down(cfhsi->ops);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001320
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001321 /* Free buffers. */
1322 kfree(tx_buf);
1323 kfree(rx_buf);
sjur.brandeland@stericsson.com5f614e62012-04-12 08:18:08 +00001324 kfree(flip_buf);
sjur.brandeland@stericsson.com39abbae2012-04-12 08:27:27 +00001325 return 0;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001326}
1327
Sjur Brændelandc4125402012-06-25 07:49:41 +00001328static void cfhsi_uninit(struct net_device *dev)
1329{
1330 struct cfhsi *cfhsi = netdev_priv(dev);
1331 ASSERT_RTNL();
1332 symbol_put(cfhsi_get_device);
1333 list_del(&cfhsi->list);
1334}
1335
Sjur Brændeland1c385f12012-06-25 07:49:42 +00001336static const struct net_device_ops cfhsi_netdevops = {
Sjur Brændelandc4125402012-06-25 07:49:41 +00001337 .ndo_uninit = cfhsi_uninit,
sjur.brandeland@stericsson.com39abbae2012-04-12 08:27:27 +00001338 .ndo_open = cfhsi_open,
1339 .ndo_stop = cfhsi_close,
1340 .ndo_start_xmit = cfhsi_xmit
1341};
1342
Sjur Brændelandc4125402012-06-25 07:49:41 +00001343static void cfhsi_netlink_parms(struct nlattr *data[], struct cfhsi *cfhsi)
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001344{
Sjur Brændelandc4125402012-06-25 07:49:41 +00001345 int i;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001346
Sjur Brændelandc4125402012-06-25 07:49:41 +00001347 if (!data) {
1348 pr_debug("no params data found\n");
1349 return;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001350 }
Sjur Brændelandc4125402012-06-25 07:49:41 +00001351
1352 i = __IFLA_CAIF_HSI_INACTIVITY_TOUT;
1353 if (data[i])
1354 inactivity_timeout = nla_get_u32(data[i]);
1355
1356 i = __IFLA_CAIF_HSI_AGGREGATION_TOUT;
1357 if (data[i])
1358 aggregation_timeout = nla_get_u32(data[i]);
1359
1360 i = __IFLA_CAIF_HSI_HEAD_ALIGN;
1361 if (data[i])
1362 hsi_head_align = nla_get_u32(data[i]);
1363
1364 i = __IFLA_CAIF_HSI_TAIL_ALIGN;
1365 if (data[i])
1366 hsi_tail_align = nla_get_u32(data[i]);
1367
1368 i = __IFLA_CAIF_HSI_QHIGH_WATERMARK;
1369 if (data[i])
1370 hsi_high_threshold = nla_get_u32(data[i]);
1371}
1372
1373static int caif_hsi_changelink(struct net_device *dev, struct nlattr *tb[],
1374 struct nlattr *data[])
1375{
1376 cfhsi_netlink_parms(data, netdev_priv(dev));
1377 netdev_state_change(dev);
1378 return 0;
1379}
1380
1381static const struct nla_policy caif_hsi_policy[__IFLA_CAIF_HSI_MAX + 1] = {
1382 [__IFLA_CAIF_HSI_INACTIVITY_TOUT] = { .type = NLA_U32, .len = 4 },
1383 [__IFLA_CAIF_HSI_AGGREGATION_TOUT] = { .type = NLA_U32, .len = 4 },
1384 [__IFLA_CAIF_HSI_HEAD_ALIGN] = { .type = NLA_U32, .len = 4 },
1385 [__IFLA_CAIF_HSI_TAIL_ALIGN] = { .type = NLA_U32, .len = 4 },
1386 [__IFLA_CAIF_HSI_QHIGH_WATERMARK] = { .type = NLA_U32, .len = 4 },
1387 [__IFLA_CAIF_HSI_QLOW_WATERMARK] = { .type = NLA_U32, .len = 4 },
1388};
1389
1390static size_t caif_hsi_get_size(const struct net_device *dev)
1391{
1392 int i;
1393 size_t s = 0;
1394 for (i = __IFLA_CAIF_HSI_UNSPEC + 1; i < __IFLA_CAIF_HSI_MAX; i++)
1395 s += nla_total_size(caif_hsi_policy[i].len);
1396 return s;
1397}
1398
1399static int caif_hsi_fill_info(struct sk_buff *skb, const struct net_device *dev)
1400{
1401 if (nla_put_u32(skb, __IFLA_CAIF_HSI_INACTIVITY_TOUT,
1402 inactivity_timeout) ||
1403 nla_put_u32(skb, __IFLA_CAIF_HSI_AGGREGATION_TOUT,
1404 aggregation_timeout) ||
1405 nla_put_u32(skb, __IFLA_CAIF_HSI_HEAD_ALIGN, hsi_head_align) ||
1406 nla_put_u32(skb, __IFLA_CAIF_HSI_TAIL_ALIGN, hsi_tail_align) ||
1407 nla_put_u32(skb, __IFLA_CAIF_HSI_QHIGH_WATERMARK,
1408 hsi_high_threshold) ||
1409 nla_put_u32(skb, __IFLA_CAIF_HSI_QLOW_WATERMARK,
1410 hsi_low_threshold))
1411 return -EMSGSIZE;
1412
1413 return 0;
1414}
1415
1416static int caif_hsi_newlink(struct net *src_net, struct net_device *dev,
1417 struct nlattr *tb[], struct nlattr *data[])
1418{
1419 struct cfhsi *cfhsi = NULL;
Sjur Brændeland1c385f12012-06-25 07:49:42 +00001420 struct cfhsi_ops *(*get_ops)(void);
Sjur Brændelandc4125402012-06-25 07:49:41 +00001421
1422 ASSERT_RTNL();
1423
1424 cfhsi = netdev_priv(dev);
1425 cfhsi_netlink_parms(data, cfhsi);
1426 dev_net_set(cfhsi->ndev, src_net);
1427
Sjur Brændeland1c385f12012-06-25 07:49:42 +00001428 get_ops = symbol_get(cfhsi_get_ops);
1429 if (!get_ops) {
1430 pr_err("%s: failed to get the cfhsi_ops\n", __func__);
Sjur Brændelandc4125402012-06-25 07:49:41 +00001431 return -ENODEV;
1432 }
1433
1434 /* Assign the HSI device. */
Sjur Brændeland1c385f12012-06-25 07:49:42 +00001435 cfhsi->ops = (*get_ops)();
1436 if (!cfhsi->ops) {
1437 pr_err("%s: failed to get the cfhsi_ops\n", __func__);
Sjur Brændelandc4125402012-06-25 07:49:41 +00001438 goto err;
1439 }
1440
Sjur Brændelandc4125402012-06-25 07:49:41 +00001441 /* Assign the driver to this HSI device. */
Sjur Brændeland1c385f12012-06-25 07:49:42 +00001442 cfhsi->ops->cb_ops = &cfhsi->cb_ops;
Sjur Brændelandc4125402012-06-25 07:49:41 +00001443 if (register_netdevice(dev)) {
Sjur Brændeland1c385f12012-06-25 07:49:42 +00001444 pr_warn("%s: caif_hsi device registration failed\n", __func__);
Sjur Brændelandc4125402012-06-25 07:49:41 +00001445 goto err;
Sjur Brændelandc4125402012-06-25 07:49:41 +00001446 }
1447 /* Add CAIF HSI device to list. */
1448 list_add_tail(&cfhsi->list, &cfhsi_list);
1449
1450 return 0;
1451err:
Sjur Brændeland1c385f12012-06-25 07:49:42 +00001452 symbol_put(cfhsi_get_ops);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001453 return -ENODEV;
1454}
1455
Sjur Brændelandc4125402012-06-25 07:49:41 +00001456static struct rtnl_link_ops caif_hsi_link_ops __read_mostly = {
1457 .kind = "cfhsi",
1458 .priv_size = sizeof(struct cfhsi),
1459 .setup = cfhsi_setup,
1460 .maxtype = __IFLA_CAIF_HSI_MAX,
1461 .policy = caif_hsi_policy,
1462 .newlink = caif_hsi_newlink,
1463 .changelink = caif_hsi_changelink,
1464 .get_size = caif_hsi_get_size,
1465 .fill_info = caif_hsi_fill_info,
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001466};
1467
1468static void __exit cfhsi_exit_module(void)
1469{
1470 struct list_head *list_node;
1471 struct list_head *n;
Sjur Brændelandc4125402012-06-25 07:49:41 +00001472 struct cfhsi *cfhsi;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001473
Sjur Brændelandc4125402012-06-25 07:49:41 +00001474 rtnl_link_unregister(&caif_hsi_link_ops);
1475
1476 rtnl_lock();
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001477 list_for_each_safe(list_node, n, &cfhsi_list) {
1478 cfhsi = list_entry(list_node, struct cfhsi, list);
Sjur Brændelandc4125402012-06-25 07:49:41 +00001479 unregister_netdev(cfhsi->ndev);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001480 }
Sjur Brændelandc4125402012-06-25 07:49:41 +00001481 rtnl_unlock();
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001482}
1483
1484static int __init cfhsi_init_module(void)
1485{
Sjur Brændelandc4125402012-06-25 07:49:41 +00001486 return rtnl_link_register(&caif_hsi_link_ops);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001487}
1488
1489module_init(cfhsi_init_module);
1490module_exit(cfhsi_exit_module);