blob: a14f85c0f0e830700b0cdad4010f9f1419b2db8d [file] [log] [blame]
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
4 * Author: Daniel Martensson / daniel.martensson@stericsson.com
5 * Dmitry.Tarnyagin / dmitry.tarnyagin@stericsson.com
6 * License terms: GNU General Public License (GPL) version 2.
7 */
8
sjur.brandeland@stericsson.com39abbae2012-04-12 08:27:27 +00009#define pr_fmt(fmt) KBUILD_MODNAME fmt
10
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +000011#include <linux/init.h>
12#include <linux/module.h>
13#include <linux/device.h>
14#include <linux/platform_device.h>
15#include <linux/netdevice.h>
16#include <linux/string.h>
17#include <linux/list.h>
18#include <linux/interrupt.h>
19#include <linux/delay.h>
20#include <linux/sched.h>
21#include <linux/if_arp.h>
22#include <linux/timer.h>
Sjur Brændelandc4125402012-06-25 07:49:41 +000023#include <net/rtnetlink.h>
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +000024#include <linux/pkt_sched.h>
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +000025#include <net/caif/caif_layer.h>
26#include <net/caif/caif_hsi.h>
27
28MODULE_LICENSE("GPL");
29MODULE_AUTHOR("Daniel Martensson<daniel.martensson@stericsson.com>");
30MODULE_DESCRIPTION("CAIF HSI driver");
31
32/* Returns the number of padding bytes for alignment. */
33#define PAD_POW2(x, pow) ((((x)&((pow)-1)) == 0) ? 0 :\
34 (((pow)-((x)&((pow)-1)))))
35
Dmitry Tarnyagin28bd2042011-10-13 11:29:27 +000036static int inactivity_timeout = 1000;
37module_param(inactivity_timeout, int, S_IRUGO | S_IWUSR);
38MODULE_PARM_DESC(inactivity_timeout, "Inactivity timeout on HSI, ms.");
39
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +000040static int aggregation_timeout = 1;
41module_param(aggregation_timeout, int, S_IRUGO | S_IWUSR);
42MODULE_PARM_DESC(aggregation_timeout, "Aggregation timeout on HSI, ms.");
43
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +000044/*
45 * HSI padding options.
46 * Warning: must be a base of 2 (& operation used) and can not be zero !
47 */
48static int hsi_head_align = 4;
49module_param(hsi_head_align, int, S_IRUGO);
50MODULE_PARM_DESC(hsi_head_align, "HSI head alignment.");
51
52static int hsi_tail_align = 4;
53module_param(hsi_tail_align, int, S_IRUGO);
54MODULE_PARM_DESC(hsi_tail_align, "HSI tail alignment.");
55
56/*
57 * HSI link layer flowcontrol thresholds.
58 * Warning: A high threshold value migth increase throughput but it will at
59 * the same time prevent channel prioritization and increase the risk of
60 * flooding the modem. The high threshold should be above the low.
61 */
62static int hsi_high_threshold = 100;
63module_param(hsi_high_threshold, int, S_IRUGO);
64MODULE_PARM_DESC(hsi_high_threshold, "HSI high threshold (FLOW OFF).");
65
66static int hsi_low_threshold = 50;
67module_param(hsi_low_threshold, int, S_IRUGO);
68MODULE_PARM_DESC(hsi_low_threshold, "HSI high threshold (FLOW ON).");
69
70#define ON 1
71#define OFF 0
72
73/*
74 * Threshold values for the HSI packet queue. Flowcontrol will be asserted
75 * when the number of packets exceeds HIGH_WATER_MARK. It will not be
76 * de-asserted before the number of packets drops below LOW_WATER_MARK.
77 */
78#define LOW_WATER_MARK hsi_low_threshold
79#define HIGH_WATER_MARK hsi_high_threshold
80
81static LIST_HEAD(cfhsi_list);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +000082
83static void cfhsi_inactivity_tout(unsigned long arg)
84{
85 struct cfhsi *cfhsi = (struct cfhsi *)arg;
86
Sjur Brændeland90de9bab2012-06-25 07:49:38 +000087 netdev_dbg(cfhsi->ndev, "%s.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +000088 __func__);
89
90 /* Schedule power down work queue. */
91 if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
92 queue_work(cfhsi->wq, &cfhsi->wake_down_work);
93}
94
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +000095static void cfhsi_update_aggregation_stats(struct cfhsi *cfhsi,
96 const struct sk_buff *skb,
97 int direction)
98{
99 struct caif_payload_info *info;
100 int hpad, tpad, len;
101
102 info = (struct caif_payload_info *)&skb->cb;
103 hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
104 tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);
105 len = skb->len + hpad + tpad;
106
107 if (direction > 0)
108 cfhsi->aggregation_len += len;
109 else if (direction < 0)
110 cfhsi->aggregation_len -= len;
111}
112
113static bool cfhsi_can_send_aggregate(struct cfhsi *cfhsi)
114{
115 int i;
116
Kim Lilliestierna XXa5c96b52012-06-25 07:49:37 +0000117 if (cfhsi->aggregation_timeout == 0)
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000118 return true;
119
120 for (i = 0; i < CFHSI_PRIO_BEBK; ++i) {
121 if (cfhsi->qhead[i].qlen)
122 return true;
123 }
124
125 /* TODO: Use aggregation_len instead */
126 if (cfhsi->qhead[CFHSI_PRIO_BEBK].qlen >= CFHSI_MAX_PKTS)
127 return true;
128
129 return false;
130}
131
132static struct sk_buff *cfhsi_dequeue(struct cfhsi *cfhsi)
133{
134 struct sk_buff *skb;
135 int i;
136
137 for (i = 0; i < CFHSI_PRIO_LAST; ++i) {
138 skb = skb_dequeue(&cfhsi->qhead[i]);
139 if (skb)
140 break;
141 }
142
143 return skb;
144}
145
146static int cfhsi_tx_queue_len(struct cfhsi *cfhsi)
147{
148 int i, len = 0;
149 for (i = 0; i < CFHSI_PRIO_LAST; ++i)
150 len += skb_queue_len(&cfhsi->qhead[i]);
151 return len;
152}
153
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000154static void cfhsi_abort_tx(struct cfhsi *cfhsi)
155{
156 struct sk_buff *skb;
157
158 for (;;) {
159 spin_lock_bh(&cfhsi->lock);
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000160 skb = cfhsi_dequeue(cfhsi);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000161 if (!skb)
162 break;
163
164 cfhsi->ndev->stats.tx_errors++;
165 cfhsi->ndev->stats.tx_dropped++;
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000166 cfhsi_update_aggregation_stats(cfhsi, skb, -1);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000167 spin_unlock_bh(&cfhsi->lock);
168 kfree_skb(skb);
169 }
170 cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
171 if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000172 mod_timer(&cfhsi->inactivity_timer,
Dmitry Tarnyagin28bd2042011-10-13 11:29:27 +0000173 jiffies + cfhsi->inactivity_timeout);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000174 spin_unlock_bh(&cfhsi->lock);
175}
176
177static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
178{
179 char buffer[32]; /* Any reasonable value */
180 size_t fifo_occupancy;
181 int ret;
182
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000183 netdev_dbg(cfhsi->ndev, "%s.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000184 __func__);
185
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000186 do {
187 ret = cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
188 &fifo_occupancy);
189 if (ret) {
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000190 netdev_warn(cfhsi->ndev,
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000191 "%s: can't get FIFO occupancy: %d.\n",
192 __func__, ret);
193 break;
194 } else if (!fifo_occupancy)
195 /* No more data, exitting normally */
196 break;
197
198 fifo_occupancy = min(sizeof(buffer), fifo_occupancy);
199 set_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
200 ret = cfhsi->dev->cfhsi_rx(buffer, fifo_occupancy,
201 cfhsi->dev);
202 if (ret) {
203 clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000204 netdev_warn(cfhsi->ndev,
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000205 "%s: can't read data: %d.\n",
206 __func__, ret);
207 break;
208 }
209
210 ret = 5 * HZ;
Daniel Martensson687b13e2011-10-13 11:29:25 +0000211 ret = wait_event_interruptible_timeout(cfhsi->flush_fifo_wait,
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000212 !test_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits), ret);
213
214 if (ret < 0) {
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000215 netdev_warn(cfhsi->ndev,
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000216 "%s: can't wait for flush complete: %d.\n",
217 __func__, ret);
218 break;
219 } else if (!ret) {
220 ret = -ETIMEDOUT;
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000221 netdev_warn(cfhsi->ndev,
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000222 "%s: timeout waiting for flush complete.\n",
223 __func__);
224 break;
225 }
226 } while (1);
227
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000228 return ret;
229}
230
231static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
232{
233 int nfrms = 0;
234 int pld_len = 0;
235 struct sk_buff *skb;
236 u8 *pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
237
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000238 skb = cfhsi_dequeue(cfhsi);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000239 if (!skb)
240 return 0;
241
sjur.brandeland@stericsson.com94230fe2011-10-13 11:29:22 +0000242 /* Clear offset. */
243 desc->offset = 0;
244
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000245 /* Check if we can embed a CAIF frame. */
246 if (skb->len < CFHSI_MAX_EMB_FRM_SZ) {
247 struct caif_payload_info *info;
Sjur Brændelandb42f7b52012-06-25 07:49:39 +0000248 int hpad;
249 int tpad;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000250
251 /* Calculate needed head alignment and tail alignment. */
252 info = (struct caif_payload_info *)&skb->cb;
253
254 hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
255 tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);
256
257 /* Check if frame still fits with added alignment. */
258 if ((skb->len + hpad + tpad) <= CFHSI_MAX_EMB_FRM_SZ) {
259 u8 *pemb = desc->emb_frm;
260 desc->offset = CFHSI_DESC_SHORT_SZ;
261 *pemb = (u8)(hpad - 1);
262 pemb += hpad;
263
264 /* Update network statistics. */
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000265 spin_lock_bh(&cfhsi->lock);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000266 cfhsi->ndev->stats.tx_packets++;
267 cfhsi->ndev->stats.tx_bytes += skb->len;
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000268 cfhsi_update_aggregation_stats(cfhsi, skb, -1);
269 spin_unlock_bh(&cfhsi->lock);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000270
271 /* Copy in embedded CAIF frame. */
272 skb_copy_bits(skb, 0, pemb, skb->len);
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000273
274 /* Consume the SKB */
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000275 consume_skb(skb);
276 skb = NULL;
277 }
sjur.brandeland@stericsson.com94230fe2011-10-13 11:29:22 +0000278 }
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000279
280 /* Create payload CAIF frames. */
281 pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
282 while (nfrms < CFHSI_MAX_PKTS) {
283 struct caif_payload_info *info;
Sjur Brændelandb42f7b52012-06-25 07:49:39 +0000284 int hpad;
285 int tpad;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000286
287 if (!skb)
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000288 skb = cfhsi_dequeue(cfhsi);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000289
290 if (!skb)
291 break;
292
293 /* Calculate needed head alignment and tail alignment. */
294 info = (struct caif_payload_info *)&skb->cb;
295
296 hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
297 tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);
298
299 /* Fill in CAIF frame length in descriptor. */
300 desc->cffrm_len[nfrms] = hpad + skb->len + tpad;
301
302 /* Fill head padding information. */
303 *pfrm = (u8)(hpad - 1);
304 pfrm += hpad;
305
306 /* Update network statistics. */
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000307 spin_lock_bh(&cfhsi->lock);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000308 cfhsi->ndev->stats.tx_packets++;
309 cfhsi->ndev->stats.tx_bytes += skb->len;
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000310 cfhsi_update_aggregation_stats(cfhsi, skb, -1);
311 spin_unlock_bh(&cfhsi->lock);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000312
313 /* Copy in CAIF frame. */
314 skb_copy_bits(skb, 0, pfrm, skb->len);
315
316 /* Update payload length. */
317 pld_len += desc->cffrm_len[nfrms];
318
319 /* Update frame pointer. */
320 pfrm += skb->len + tpad;
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000321
322 /* Consume the SKB */
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000323 consume_skb(skb);
324 skb = NULL;
325
326 /* Update number of frames. */
327 nfrms++;
328 }
329
330 /* Unused length fields should be zero-filled (according to SPEC). */
331 while (nfrms < CFHSI_MAX_PKTS) {
332 desc->cffrm_len[nfrms] = 0x0000;
333 nfrms++;
334 }
335
336 /* Check if we can piggy-back another descriptor. */
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000337 if (cfhsi_can_send_aggregate(cfhsi))
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000338 desc->header |= CFHSI_PIGGY_DESC;
339 else
340 desc->header &= ~CFHSI_PIGGY_DESC;
341
342 return CFHSI_DESC_SZ + pld_len;
343}
344
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000345static void cfhsi_start_tx(struct cfhsi *cfhsi)
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000346{
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000347 struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
348 int len, res;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000349
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000350 netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000351
352 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
353 return;
354
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000355 do {
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000356 /* Create HSI frame. */
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000357 len = cfhsi_tx_frm(desc, cfhsi);
358 if (!len) {
359 spin_lock_bh(&cfhsi->lock);
360 if (unlikely(cfhsi_tx_queue_len(cfhsi))) {
Dmitry Tarnyaginfe47f122011-10-13 11:29:23 +0000361 spin_unlock_bh(&cfhsi->lock);
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000362 res = -EAGAIN;
363 continue;
Dmitry Tarnyaginfe47f122011-10-13 11:29:23 +0000364 }
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000365 cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
366 /* Start inactivity timer. */
367 mod_timer(&cfhsi->inactivity_timer,
368 jiffies + cfhsi->inactivity_timeout);
369 spin_unlock_bh(&cfhsi->lock);
370 break;
371 }
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000372
373 /* Set up new transfer. */
374 res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000375 if (WARN_ON(res < 0))
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000376 netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000377 __func__, res);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000378 } while (res < 0);
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000379}
Dmitry Tarnyaginfe47f122011-10-13 11:29:23 +0000380
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000381static void cfhsi_tx_done(struct cfhsi *cfhsi)
382{
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000383 netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000384
385 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
386 return;
387
388 /*
389 * Send flow on if flow off has been previously signalled
390 * and number of packets is below low water mark.
391 */
392 spin_lock_bh(&cfhsi->lock);
393 if (cfhsi->flow_off_sent &&
394 cfhsi_tx_queue_len(cfhsi) <= cfhsi->q_low_mark &&
395 cfhsi->cfdev.flowctrl) {
396
397 cfhsi->flow_off_sent = 0;
398 cfhsi->cfdev.flowctrl(cfhsi->ndev, ON);
399 }
400
401 if (cfhsi_can_send_aggregate(cfhsi)) {
402 spin_unlock_bh(&cfhsi->lock);
403 cfhsi_start_tx(cfhsi);
404 } else {
405 mod_timer(&cfhsi->aggregation_timer,
406 jiffies + cfhsi->aggregation_timeout);
407 spin_unlock_bh(&cfhsi->lock);
408 }
409
Dmitry Tarnyaginfe47f122011-10-13 11:29:23 +0000410 return;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000411}
412
413static void cfhsi_tx_done_cb(struct cfhsi_drv *drv)
414{
415 struct cfhsi *cfhsi;
416
417 cfhsi = container_of(drv, struct cfhsi, drv);
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000418 netdev_dbg(cfhsi->ndev, "%s.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000419 __func__);
420
421 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
422 return;
Daniel Martensson687b13e2011-10-13 11:29:25 +0000423 cfhsi_tx_done(cfhsi);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000424}
425
Daniel Martensson5bbed922011-10-13 11:29:28 +0000426static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000427{
428 int xfer_sz = 0;
429 int nfrms = 0;
430 u16 *plen = NULL;
431 u8 *pfrm = NULL;
432
433 if ((desc->header & ~CFHSI_PIGGY_DESC) ||
434 (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000435 netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000436 __func__);
Daniel Martensson5bbed922011-10-13 11:29:28 +0000437 return -EPROTO;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000438 }
439
440 /* Check for embedded CAIF frame. */
441 if (desc->offset) {
442 struct sk_buff *skb;
443 u8 *dst = NULL;
Daniel Martensson687b13e2011-10-13 11:29:25 +0000444 int len = 0;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000445 pfrm = ((u8 *)desc) + desc->offset;
446
447 /* Remove offset padding. */
448 pfrm += *pfrm + 1;
449
450 /* Read length of CAIF frame (little endian). */
451 len = *pfrm;
452 len |= ((*(pfrm+1)) << 8) & 0xFF00;
453 len += 2; /* Add FCS fields. */
454
Daniel Martensson5bbed922011-10-13 11:29:28 +0000455 /* Sanity check length of CAIF frame. */
456 if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000457 netdev_err(cfhsi->ndev, "%s: Invalid length.\n",
Daniel Martensson5bbed922011-10-13 11:29:28 +0000458 __func__);
459 return -EPROTO;
460 }
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000461
462 /* Allocate SKB (OK even in IRQ context). */
Daniel Martensson687b13e2011-10-13 11:29:25 +0000463 skb = alloc_skb(len + 1, GFP_ATOMIC);
464 if (!skb) {
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000465 netdev_err(cfhsi->ndev, "%s: Out of memory !\n",
Daniel Martensson687b13e2011-10-13 11:29:25 +0000466 __func__);
467 return -ENOMEM;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000468 }
469 caif_assert(skb != NULL);
470
471 dst = skb_put(skb, len);
472 memcpy(dst, pfrm, len);
473
474 skb->protocol = htons(ETH_P_CAIF);
475 skb_reset_mac_header(skb);
476 skb->dev = cfhsi->ndev;
477
478 /*
479 * We are called from a arch specific platform device.
480 * Unfortunately we don't know what context we're
481 * running in.
482 */
483 if (in_interrupt())
484 netif_rx(skb);
485 else
486 netif_rx_ni(skb);
487
488 /* Update network statistics. */
489 cfhsi->ndev->stats.rx_packets++;
490 cfhsi->ndev->stats.rx_bytes += len;
491 }
492
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000493 /* Calculate transfer length. */
494 plen = desc->cffrm_len;
495 while (nfrms < CFHSI_MAX_PKTS && *plen) {
496 xfer_sz += *plen;
497 plen++;
498 nfrms++;
499 }
500
501 /* Check for piggy-backed descriptor. */
502 if (desc->header & CFHSI_PIGGY_DESC)
503 xfer_sz += CFHSI_DESC_SZ;
504
Daniel Martensson5bbed922011-10-13 11:29:28 +0000505 if ((xfer_sz % 4) || (xfer_sz > (CFHSI_BUF_SZ_RX - CFHSI_DESC_SZ))) {
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000506 netdev_err(cfhsi->ndev,
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000507 "%s: Invalid payload len: %d, ignored.\n",
508 __func__, xfer_sz);
Daniel Martensson5bbed922011-10-13 11:29:28 +0000509 return -EPROTO;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000510 }
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000511 return xfer_sz;
512}
513
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +0000514static int cfhsi_rx_desc_len(struct cfhsi_desc *desc)
515{
516 int xfer_sz = 0;
517 int nfrms = 0;
518 u16 *plen;
519
520 if ((desc->header & ~CFHSI_PIGGY_DESC) ||
521 (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
522
523 pr_err("Invalid descriptor. %x %x\n", desc->header,
524 desc->offset);
525 return -EPROTO;
526 }
527
528 /* Calculate transfer length. */
529 plen = desc->cffrm_len;
530 while (nfrms < CFHSI_MAX_PKTS && *plen) {
531 xfer_sz += *plen;
532 plen++;
533 nfrms++;
534 }
535
536 if (xfer_sz % 4) {
537 pr_err("Invalid payload len: %d, ignored.\n", xfer_sz);
538 return -EPROTO;
539 }
540 return xfer_sz;
541}
542
Daniel Martensson5bbed922011-10-13 11:29:28 +0000543static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000544{
545 int rx_sz = 0;
546 int nfrms = 0;
547 u16 *plen = NULL;
548 u8 *pfrm = NULL;
549
550 /* Sanity check header and offset. */
551 if (WARN_ON((desc->header & ~CFHSI_PIGGY_DESC) ||
552 (desc->offset > CFHSI_MAX_EMB_FRM_SZ))) {
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000553 netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000554 __func__);
Daniel Martensson5bbed922011-10-13 11:29:28 +0000555 return -EPROTO;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000556 }
557
558 /* Set frame pointer to start of payload. */
559 pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
560 plen = desc->cffrm_len;
Daniel Martensson687b13e2011-10-13 11:29:25 +0000561
562 /* Skip already processed frames. */
563 while (nfrms < cfhsi->rx_state.nfrms) {
564 pfrm += *plen;
565 rx_sz += *plen;
566 plen++;
567 nfrms++;
568 }
569
570 /* Parse payload. */
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000571 while (nfrms < CFHSI_MAX_PKTS && *plen) {
572 struct sk_buff *skb;
573 u8 *dst = NULL;
574 u8 *pcffrm = NULL;
Sjur Brændelandb42f7b52012-06-25 07:49:39 +0000575 int len;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000576
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000577 /* CAIF frame starts after head padding. */
578 pcffrm = pfrm + *pfrm + 1;
579
580 /* Read length of CAIF frame (little endian). */
581 len = *pcffrm;
582 len |= ((*(pcffrm + 1)) << 8) & 0xFF00;
583 len += 2; /* Add FCS fields. */
584
Daniel Martensson5bbed922011-10-13 11:29:28 +0000585 /* Sanity check length of CAIF frames. */
586 if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000587 netdev_err(cfhsi->ndev, "%s: Invalid length.\n",
Daniel Martensson5bbed922011-10-13 11:29:28 +0000588 __func__);
589 return -EPROTO;
590 }
591
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000592 /* Allocate SKB (OK even in IRQ context). */
Daniel Martensson687b13e2011-10-13 11:29:25 +0000593 skb = alloc_skb(len + 1, GFP_ATOMIC);
594 if (!skb) {
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000595 netdev_err(cfhsi->ndev, "%s: Out of memory !\n",
Daniel Martensson687b13e2011-10-13 11:29:25 +0000596 __func__);
597 cfhsi->rx_state.nfrms = nfrms;
598 return -ENOMEM;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000599 }
600 caif_assert(skb != NULL);
601
602 dst = skb_put(skb, len);
603 memcpy(dst, pcffrm, len);
604
605 skb->protocol = htons(ETH_P_CAIF);
606 skb_reset_mac_header(skb);
607 skb->dev = cfhsi->ndev;
608
609 /*
610 * We're called from a platform device,
611 * and don't know the context we're running in.
612 */
613 if (in_interrupt())
614 netif_rx(skb);
615 else
616 netif_rx_ni(skb);
617
618 /* Update network statistics. */
619 cfhsi->ndev->stats.rx_packets++;
620 cfhsi->ndev->stats.rx_bytes += len;
621
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000622 pfrm += *plen;
623 rx_sz += *plen;
624 plen++;
625 nfrms++;
626 }
627
628 return rx_sz;
629}
630
Daniel Martensson687b13e2011-10-13 11:29:25 +0000631static void cfhsi_rx_done(struct cfhsi *cfhsi)
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000632{
633 int res;
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +0000634 int desc_pld_len = 0, rx_len, rx_state;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000635 struct cfhsi_desc *desc = NULL;
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +0000636 u8 *rx_ptr, *rx_buf;
637 struct cfhsi_desc *piggy_desc = NULL;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000638
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000639 desc = (struct cfhsi_desc *)cfhsi->rx_buf;
640
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000641 netdev_dbg(cfhsi->ndev, "%s\n", __func__);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000642
643 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
644 return;
645
646 /* Update inactivity timer if pending. */
Dmitry Tarnyagin73033c92011-10-13 11:29:24 +0000647 spin_lock_bh(&cfhsi->lock);
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000648 mod_timer_pending(&cfhsi->inactivity_timer,
Dmitry Tarnyagin28bd2042011-10-13 11:29:27 +0000649 jiffies + cfhsi->inactivity_timeout);
Dmitry Tarnyagin73033c92011-10-13 11:29:24 +0000650 spin_unlock_bh(&cfhsi->lock);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000651
Daniel Martensson687b13e2011-10-13 11:29:25 +0000652 if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +0000653 desc_pld_len = cfhsi_rx_desc_len(desc);
654
655 if (desc_pld_len < 0)
Daniel Martensson5bbed922011-10-13 11:29:28 +0000656 goto out_of_sync;
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +0000657
658 rx_buf = cfhsi->rx_buf;
659 rx_len = desc_pld_len;
660 if (desc_pld_len > 0 && (desc->header & CFHSI_PIGGY_DESC))
661 rx_len += CFHSI_DESC_SZ;
662 if (desc_pld_len == 0)
663 rx_buf = cfhsi->rx_flip_buf;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000664 } else {
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +0000665 rx_buf = cfhsi->rx_flip_buf;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000666
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +0000667 rx_len = CFHSI_DESC_SZ;
668 if (cfhsi->rx_state.pld_len > 0 &&
669 (desc->header & CFHSI_PIGGY_DESC)) {
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000670
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000671 piggy_desc = (struct cfhsi_desc *)
672 (desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ +
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +0000673 cfhsi->rx_state.pld_len);
674
Daniel Martensson687b13e2011-10-13 11:29:25 +0000675 cfhsi->rx_state.piggy_desc = true;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000676
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +0000677 /* Extract payload len from piggy-backed descriptor. */
678 desc_pld_len = cfhsi_rx_desc_len(piggy_desc);
679 if (desc_pld_len < 0)
680 goto out_of_sync;
681
Kim Lilliestierna XX4e7bb592012-06-25 07:49:36 +0000682 if (desc_pld_len > 0) {
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +0000683 rx_len = desc_pld_len;
Kim Lilliestierna XX4e7bb592012-06-25 07:49:36 +0000684 if (piggy_desc->header & CFHSI_PIGGY_DESC)
685 rx_len += CFHSI_DESC_SZ;
686 }
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000687
688 /*
689 * Copy needed information from the piggy-backed
690 * descriptor to the descriptor in the start.
691 */
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +0000692 memcpy(rx_buf, (u8 *)piggy_desc,
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000693 CFHSI_DESC_SHORT_SZ);
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +0000694 /* Mark no embedded frame here */
695 piggy_desc->offset = 0;
Daniel Martensson5bbed922011-10-13 11:29:28 +0000696 }
Daniel Martensson687b13e2011-10-13 11:29:25 +0000697 }
698
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000699 if (desc_pld_len) {
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +0000700 rx_state = CFHSI_RX_STATE_PAYLOAD;
701 rx_ptr = rx_buf + CFHSI_DESC_SZ;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000702 } else {
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +0000703 rx_state = CFHSI_RX_STATE_DESC;
704 rx_ptr = rx_buf;
705 rx_len = CFHSI_DESC_SZ;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000706 }
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000707
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +0000708 /* Initiate next read */
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000709 if (test_bit(CFHSI_AWAKE, &cfhsi->bits)) {
710 /* Set up new transfer. */
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000711 netdev_dbg(cfhsi->ndev, "%s: Start RX.\n",
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +0000712 __func__);
713
714 res = cfhsi->dev->cfhsi_rx(rx_ptr, rx_len,
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000715 cfhsi->dev);
716 if (WARN_ON(res < 0)) {
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000717 netdev_err(cfhsi->ndev, "%s: RX error %d.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000718 __func__, res);
719 cfhsi->ndev->stats.rx_errors++;
720 cfhsi->ndev->stats.rx_dropped++;
721 }
722 }
Daniel Martensson687b13e2011-10-13 11:29:25 +0000723
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +0000724 if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
725 /* Extract payload from descriptor */
726 if (cfhsi_rx_desc(desc, cfhsi) < 0)
727 goto out_of_sync;
728 } else {
729 /* Extract payload */
730 if (cfhsi_rx_pld(desc, cfhsi) < 0)
731 goto out_of_sync;
732 if (piggy_desc) {
733 /* Extract any payload in piggyback descriptor. */
734 if (cfhsi_rx_desc(piggy_desc, cfhsi) < 0)
735 goto out_of_sync;
736 }
Daniel Martensson687b13e2011-10-13 11:29:25 +0000737 }
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +0000738
739 /* Update state info */
740 memset(&cfhsi->rx_state, 0, sizeof(cfhsi->rx_state));
741 cfhsi->rx_state.state = rx_state;
742 cfhsi->rx_ptr = rx_ptr;
743 cfhsi->rx_len = rx_len;
744 cfhsi->rx_state.pld_len = desc_pld_len;
745 cfhsi->rx_state.piggy_desc = desc->header & CFHSI_PIGGY_DESC;
746
747 if (rx_buf != cfhsi->rx_buf)
748 swap(cfhsi->rx_buf, cfhsi->rx_flip_buf);
Daniel Martensson5bbed922011-10-13 11:29:28 +0000749 return;
750
751out_of_sync:
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000752 netdev_err(cfhsi->ndev, "%s: Out of sync.\n", __func__);
Daniel Martensson5bbed922011-10-13 11:29:28 +0000753 print_hex_dump_bytes("--> ", DUMP_PREFIX_NONE,
754 cfhsi->rx_buf, CFHSI_DESC_SZ);
755 schedule_work(&cfhsi->out_of_sync_work);
Daniel Martensson687b13e2011-10-13 11:29:25 +0000756}
757
758static void cfhsi_rx_slowpath(unsigned long arg)
759{
760 struct cfhsi *cfhsi = (struct cfhsi *)arg;
761
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000762 netdev_dbg(cfhsi->ndev, "%s.\n",
Daniel Martensson687b13e2011-10-13 11:29:25 +0000763 __func__);
764
765 cfhsi_rx_done(cfhsi);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000766}
767
768static void cfhsi_rx_done_cb(struct cfhsi_drv *drv)
769{
770 struct cfhsi *cfhsi;
771
772 cfhsi = container_of(drv, struct cfhsi, drv);
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000773 netdev_dbg(cfhsi->ndev, "%s.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000774 __func__);
775
776 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
777 return;
778
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000779 if (test_and_clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits))
780 wake_up_interruptible(&cfhsi->flush_fifo_wait);
781 else
Daniel Martensson687b13e2011-10-13 11:29:25 +0000782 cfhsi_rx_done(cfhsi);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000783}
784
785static void cfhsi_wake_up(struct work_struct *work)
786{
787 struct cfhsi *cfhsi = NULL;
788 int res;
789 int len;
790 long ret;
791
792 cfhsi = container_of(work, struct cfhsi, wake_up_work);
793
794 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
795 return;
796
797 if (unlikely(test_bit(CFHSI_AWAKE, &cfhsi->bits))) {
798 /* It happenes when wakeup is requested by
799 * both ends at the same time. */
800 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
Daniel Martensson5ea2ef52011-10-13 11:29:29 +0000801 clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000802 return;
803 }
804
805 /* Activate wake line. */
806 cfhsi->dev->cfhsi_wake_up(cfhsi->dev);
807
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000808 netdev_dbg(cfhsi->ndev, "%s: Start waiting.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000809 __func__);
810
811 /* Wait for acknowledge. */
Daniel Martensson687b13e2011-10-13 11:29:25 +0000812 ret = CFHSI_WAKE_TOUT;
813 ret = wait_event_interruptible_timeout(cfhsi->wake_up_wait,
814 test_and_clear_bit(CFHSI_WAKE_UP_ACK,
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000815 &cfhsi->bits), ret);
816 if (unlikely(ret < 0)) {
817 /* Interrupted by signal. */
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000818 netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000819 __func__, ret);
Daniel Martensson5ea2ef52011-10-13 11:29:29 +0000820
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000821 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
822 cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
823 return;
824 } else if (!ret) {
Daniel Martensson5ea2ef52011-10-13 11:29:29 +0000825 bool ca_wake = false;
826 size_t fifo_occupancy = 0;
827
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000828 /* Wakeup timeout */
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000829 netdev_dbg(cfhsi->ndev, "%s: Timeout.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000830 __func__);
Daniel Martensson5ea2ef52011-10-13 11:29:29 +0000831
832 /* Check FIFO to check if modem has sent something. */
833 WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
834 &fifo_occupancy));
835
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000836 netdev_dbg(cfhsi->ndev, "%s: Bytes in FIFO: %u.\n",
Daniel Martensson5ea2ef52011-10-13 11:29:29 +0000837 __func__, (unsigned) fifo_occupancy);
838
839 /* Check if we misssed the interrupt. */
840 WARN_ON(cfhsi->dev->cfhsi_get_peer_wake(cfhsi->dev,
841 &ca_wake));
842
843 if (ca_wake) {
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000844 netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n",
Daniel Martensson5ea2ef52011-10-13 11:29:29 +0000845 __func__);
846
847 /* Clear the CFHSI_WAKE_UP_ACK bit to prevent race. */
848 clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
849
850 /* Continue execution. */
851 goto wake_ack;
852 }
853
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000854 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
855 cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
856 return;
857 }
Daniel Martensson5ea2ef52011-10-13 11:29:29 +0000858wake_ack:
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000859 netdev_dbg(cfhsi->ndev, "%s: Woken.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000860 __func__);
861
862 /* Clear power up bit. */
863 set_bit(CFHSI_AWAKE, &cfhsi->bits);
864 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
865
866 /* Resume read operation. */
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000867 netdev_dbg(cfhsi->ndev, "%s: Start RX.\n", __func__);
Daniel Martensson687b13e2011-10-13 11:29:25 +0000868 res = cfhsi->dev->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len, cfhsi->dev);
869
870 if (WARN_ON(res < 0))
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000871 netdev_err(cfhsi->ndev, "%s: RX err %d.\n", __func__, res);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000872
873 /* Clear power up acknowledment. */
874 clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
875
876 spin_lock_bh(&cfhsi->lock);
877
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000878 /* Resume transmit if queues are not empty. */
879 if (!cfhsi_tx_queue_len(cfhsi)) {
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000880 netdev_dbg(cfhsi->ndev, "%s: Peer wake, start timer.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000881 __func__);
882 /* Start inactivity timer. */
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000883 mod_timer(&cfhsi->inactivity_timer,
Dmitry Tarnyagin28bd2042011-10-13 11:29:27 +0000884 jiffies + cfhsi->inactivity_timeout);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000885 spin_unlock_bh(&cfhsi->lock);
886 return;
887 }
888
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000889 netdev_dbg(cfhsi->ndev, "%s: Host wake.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000890 __func__);
891
892 spin_unlock_bh(&cfhsi->lock);
893
894 /* Create HSI frame. */
895 len = cfhsi_tx_frm((struct cfhsi_desc *)cfhsi->tx_buf, cfhsi);
896
897 if (likely(len > 0)) {
898 /* Set up new transfer. */
899 res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
900 if (WARN_ON(res < 0)) {
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000901 netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000902 __func__, res);
903 cfhsi_abort_tx(cfhsi);
904 }
905 } else {
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000906 netdev_err(cfhsi->ndev,
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000907 "%s: Failed to create HSI frame: %d.\n",
908 __func__, len);
909 }
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000910}
911
912static void cfhsi_wake_down(struct work_struct *work)
913{
914 long ret;
915 struct cfhsi *cfhsi = NULL;
Daniel Martensson687b13e2011-10-13 11:29:25 +0000916 size_t fifo_occupancy = 0;
917 int retry = CFHSI_WAKE_TOUT;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000918
919 cfhsi = container_of(work, struct cfhsi, wake_down_work);
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000920 netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000921
922 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
923 return;
924
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000925 /* Deactivate wake line. */
926 cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
927
928 /* Wait for acknowledge. */
Daniel Martensson687b13e2011-10-13 11:29:25 +0000929 ret = CFHSI_WAKE_TOUT;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000930 ret = wait_event_interruptible_timeout(cfhsi->wake_down_wait,
Daniel Martensson687b13e2011-10-13 11:29:25 +0000931 test_and_clear_bit(CFHSI_WAKE_DOWN_ACK,
932 &cfhsi->bits), ret);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000933 if (ret < 0) {
934 /* Interrupted by signal. */
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000935 netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000936 __func__, ret);
937 return;
938 } else if (!ret) {
Daniel Martensson5ea2ef52011-10-13 11:29:29 +0000939 bool ca_wake = true;
940
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000941 /* Timeout */
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000942 netdev_err(cfhsi->ndev, "%s: Timeout.\n", __func__);
Daniel Martensson5ea2ef52011-10-13 11:29:29 +0000943
944 /* Check if we misssed the interrupt. */
945 WARN_ON(cfhsi->dev->cfhsi_get_peer_wake(cfhsi->dev,
946 &ca_wake));
947 if (!ca_wake)
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000948 netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n",
Daniel Martensson5ea2ef52011-10-13 11:29:29 +0000949 __func__);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000950 }
951
Daniel Martensson687b13e2011-10-13 11:29:25 +0000952 /* Check FIFO occupancy. */
953 while (retry) {
954 WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
955 &fifo_occupancy));
956
957 if (!fifo_occupancy)
958 break;
959
960 set_current_state(TASK_INTERRUPTIBLE);
961 schedule_timeout(1);
962 retry--;
963 }
964
965 if (!retry)
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000966 netdev_err(cfhsi->ndev, "%s: FIFO Timeout.\n", __func__);
Daniel Martensson687b13e2011-10-13 11:29:25 +0000967
968 /* Clear AWAKE condition. */
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000969 clear_bit(CFHSI_AWAKE, &cfhsi->bits);
970
Daniel Martensson687b13e2011-10-13 11:29:25 +0000971 /* Cancel pending RX requests. */
972 cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000973
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000974}
975
Daniel Martensson5bbed922011-10-13 11:29:28 +0000976static void cfhsi_out_of_sync(struct work_struct *work)
977{
978 struct cfhsi *cfhsi = NULL;
979
980 cfhsi = container_of(work, struct cfhsi, out_of_sync_work);
981
982 rtnl_lock();
983 dev_close(cfhsi->ndev);
984 rtnl_unlock();
985}
986
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000987static void cfhsi_wake_up_cb(struct cfhsi_drv *drv)
988{
989 struct cfhsi *cfhsi = NULL;
990
991 cfhsi = container_of(drv, struct cfhsi, drv);
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000992 netdev_dbg(cfhsi->ndev, "%s.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000993 __func__);
994
995 set_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
996 wake_up_interruptible(&cfhsi->wake_up_wait);
997
998 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
999 return;
1000
1001 /* Schedule wake up work queue if the peer initiates. */
1002 if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
1003 queue_work(cfhsi->wq, &cfhsi->wake_up_work);
1004}
1005
1006static void cfhsi_wake_down_cb(struct cfhsi_drv *drv)
1007{
1008 struct cfhsi *cfhsi = NULL;
1009
1010 cfhsi = container_of(drv, struct cfhsi, drv);
Sjur Brændeland90de9bab2012-06-25 07:49:38 +00001011 netdev_dbg(cfhsi->ndev, "%s.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001012 __func__);
1013
1014 /* Initiating low power is only permitted by the host (us). */
1015 set_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
1016 wake_up_interruptible(&cfhsi->wake_down_wait);
1017}
1018
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +00001019static void cfhsi_aggregation_tout(unsigned long arg)
1020{
1021 struct cfhsi *cfhsi = (struct cfhsi *)arg;
1022
Sjur Brændeland90de9bab2012-06-25 07:49:38 +00001023 netdev_dbg(cfhsi->ndev, "%s.\n",
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +00001024 __func__);
1025
1026 cfhsi_start_tx(cfhsi);
1027}
1028
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001029static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
1030{
1031 struct cfhsi *cfhsi = NULL;
1032 int start_xfer = 0;
1033 int timer_active;
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +00001034 int prio;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001035
1036 if (!dev)
1037 return -EINVAL;
1038
1039 cfhsi = netdev_priv(dev);
1040
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +00001041 switch (skb->priority) {
1042 case TC_PRIO_BESTEFFORT:
1043 case TC_PRIO_FILLER:
1044 case TC_PRIO_BULK:
1045 prio = CFHSI_PRIO_BEBK;
1046 break;
1047 case TC_PRIO_INTERACTIVE_BULK:
1048 prio = CFHSI_PRIO_VI;
1049 break;
1050 case TC_PRIO_INTERACTIVE:
1051 prio = CFHSI_PRIO_VO;
1052 break;
1053 case TC_PRIO_CONTROL:
1054 default:
1055 prio = CFHSI_PRIO_CTL;
1056 break;
1057 }
1058
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001059 spin_lock_bh(&cfhsi->lock);
1060
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +00001061 /* Update aggregation statistics */
1062 cfhsi_update_aggregation_stats(cfhsi, skb, 1);
1063
1064 /* Queue the SKB */
1065 skb_queue_tail(&cfhsi->qhead[prio], skb);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001066
1067 /* Sanity check; xmit should not be called after unregister_netdev */
1068 if (WARN_ON(test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))) {
1069 spin_unlock_bh(&cfhsi->lock);
1070 cfhsi_abort_tx(cfhsi);
1071 return -EINVAL;
1072 }
1073
1074 /* Send flow off if number of packets is above high water mark. */
1075 if (!cfhsi->flow_off_sent &&
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +00001076 cfhsi_tx_queue_len(cfhsi) > cfhsi->q_high_mark &&
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001077 cfhsi->cfdev.flowctrl) {
1078 cfhsi->flow_off_sent = 1;
1079 cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF);
1080 }
1081
1082 if (cfhsi->tx_state == CFHSI_TX_STATE_IDLE) {
1083 cfhsi->tx_state = CFHSI_TX_STATE_XFER;
1084 start_xfer = 1;
1085 }
1086
Dmitry Tarnyagin73033c92011-10-13 11:29:24 +00001087 if (!start_xfer) {
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +00001088 /* Send aggregate if it is possible */
1089 bool aggregate_ready =
1090 cfhsi_can_send_aggregate(cfhsi) &&
1091 del_timer(&cfhsi->aggregation_timer) > 0;
Dmitry Tarnyagin73033c92011-10-13 11:29:24 +00001092 spin_unlock_bh(&cfhsi->lock);
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +00001093 if (aggregate_ready)
1094 cfhsi_start_tx(cfhsi);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001095 return 0;
Dmitry Tarnyagin73033c92011-10-13 11:29:24 +00001096 }
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001097
1098 /* Delete inactivity timer if started. */
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +00001099 timer_active = del_timer_sync(&cfhsi->inactivity_timer);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001100
Dmitry Tarnyagin73033c92011-10-13 11:29:24 +00001101 spin_unlock_bh(&cfhsi->lock);
1102
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001103 if (timer_active) {
1104 struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
1105 int len;
1106 int res;
1107
1108 /* Create HSI frame. */
1109 len = cfhsi_tx_frm(desc, cfhsi);
Roar Førdef84ea772011-12-06 12:15:44 +00001110 WARN_ON(!len);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001111
1112 /* Set up new transfer. */
1113 res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
1114 if (WARN_ON(res < 0)) {
Sjur Brændeland90de9bab2012-06-25 07:49:38 +00001115 netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001116 __func__, res);
1117 cfhsi_abort_tx(cfhsi);
1118 }
1119 } else {
1120 /* Schedule wake up work queue if the we initiate. */
1121 if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
1122 queue_work(cfhsi->wq, &cfhsi->wake_up_work);
1123 }
1124
1125 return 0;
1126}
1127
sjur.brandeland@stericsson.com39abbae2012-04-12 08:27:27 +00001128static const struct net_device_ops cfhsi_ops;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001129
1130static void cfhsi_setup(struct net_device *dev)
1131{
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +00001132 int i;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001133 struct cfhsi *cfhsi = netdev_priv(dev);
1134 dev->features = 0;
1135 dev->netdev_ops = &cfhsi_ops;
1136 dev->type = ARPHRD_CAIF;
1137 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
Sjur Brændeland34efc282012-03-04 08:38:58 +00001138 dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001139 dev->tx_queue_len = 0;
1140 dev->destructor = free_netdev;
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +00001141 for (i = 0; i < CFHSI_PRIO_LAST; ++i)
1142 skb_queue_head_init(&cfhsi->qhead[i]);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001143 cfhsi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
1144 cfhsi->cfdev.use_frag = false;
1145 cfhsi->cfdev.use_stx = false;
1146 cfhsi->cfdev.use_fcs = false;
1147 cfhsi->ndev = dev;
1148}
1149
sjur.brandeland@stericsson.com39abbae2012-04-12 08:27:27 +00001150static int cfhsi_open(struct net_device *ndev)
1151{
1152 struct cfhsi *cfhsi = netdev_priv(ndev);
1153 int res;
1154
1155 clear_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
1156
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001157 /* Initialize state vaiables. */
1158 cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
Daniel Martensson687b13e2011-10-13 11:29:25 +00001159 cfhsi->rx_state.state = CFHSI_RX_STATE_DESC;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001160
1161 /* Set flow info */
1162 cfhsi->flow_off_sent = 0;
1163 cfhsi->q_low_mark = LOW_WATER_MARK;
1164 cfhsi->q_high_mark = HIGH_WATER_MARK;
1165
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001166
1167 /*
1168 * Allocate a TX buffer with the size of a HSI packet descriptors
1169 * and the necessary room for CAIF payload frames.
1170 */
1171 cfhsi->tx_buf = kzalloc(CFHSI_BUF_SZ_TX, GFP_KERNEL);
1172 if (!cfhsi->tx_buf) {
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001173 res = -ENODEV;
1174 goto err_alloc_tx;
1175 }
1176
1177 /*
1178 * Allocate a RX buffer with the size of two HSI packet descriptors and
1179 * the necessary room for CAIF payload frames.
1180 */
1181 cfhsi->rx_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL);
1182 if (!cfhsi->rx_buf) {
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001183 res = -ENODEV;
1184 goto err_alloc_rx;
1185 }
1186
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +00001187 cfhsi->rx_flip_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL);
1188 if (!cfhsi->rx_flip_buf) {
1189 res = -ENODEV;
1190 goto err_alloc_rx_flip;
1191 }
1192
Dmitry Tarnyagin28bd2042011-10-13 11:29:27 +00001193 /* Pre-calculate inactivity timeout. */
1194 if (inactivity_timeout != -1) {
1195 cfhsi->inactivity_timeout =
1196 inactivity_timeout * HZ / 1000;
1197 if (!cfhsi->inactivity_timeout)
1198 cfhsi->inactivity_timeout = 1;
1199 else if (cfhsi->inactivity_timeout > NEXT_TIMER_MAX_DELTA)
1200 cfhsi->inactivity_timeout = NEXT_TIMER_MAX_DELTA;
1201 } else {
1202 cfhsi->inactivity_timeout = NEXT_TIMER_MAX_DELTA;
1203 }
1204
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +00001205 /* Initialize aggregation timeout */
1206 cfhsi->aggregation_timeout = aggregation_timeout;
1207
Dmitry Tarnyagin28bd2042011-10-13 11:29:27 +00001208 /* Initialize recieve vaiables. */
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001209 cfhsi->rx_ptr = cfhsi->rx_buf;
1210 cfhsi->rx_len = CFHSI_DESC_SZ;
1211
1212 /* Initialize spin locks. */
1213 spin_lock_init(&cfhsi->lock);
1214
1215 /* Set up the driver. */
1216 cfhsi->drv.tx_done_cb = cfhsi_tx_done_cb;
1217 cfhsi->drv.rx_done_cb = cfhsi_rx_done_cb;
sjur.brandeland@stericsson.com94230fe2011-10-13 11:29:22 +00001218 cfhsi->drv.wake_up_cb = cfhsi_wake_up_cb;
1219 cfhsi->drv.wake_down_cb = cfhsi_wake_down_cb;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001220
1221 /* Initialize the work queues. */
1222 INIT_WORK(&cfhsi->wake_up_work, cfhsi_wake_up);
1223 INIT_WORK(&cfhsi->wake_down_work, cfhsi_wake_down);
Daniel Martensson5bbed922011-10-13 11:29:28 +00001224 INIT_WORK(&cfhsi->out_of_sync_work, cfhsi_out_of_sync);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001225
1226 /* Clear all bit fields. */
1227 clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
1228 clear_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
1229 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
1230 clear_bit(CFHSI_AWAKE, &cfhsi->bits);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001231
1232 /* Create work thread. */
sjur.brandeland@stericsson.com39abbae2012-04-12 08:27:27 +00001233 cfhsi->wq = create_singlethread_workqueue(cfhsi->pdev->name);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001234 if (!cfhsi->wq) {
Sjur Brændeland90de9bab2012-06-25 07:49:38 +00001235 netdev_err(cfhsi->ndev, "%s: Failed to create work queue.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001236 __func__);
1237 res = -ENODEV;
1238 goto err_create_wq;
1239 }
1240
1241 /* Initialize wait queues. */
1242 init_waitqueue_head(&cfhsi->wake_up_wait);
1243 init_waitqueue_head(&cfhsi->wake_down_wait);
1244 init_waitqueue_head(&cfhsi->flush_fifo_wait);
1245
1246 /* Setup the inactivity timer. */
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +00001247 init_timer(&cfhsi->inactivity_timer);
1248 cfhsi->inactivity_timer.data = (unsigned long)cfhsi;
1249 cfhsi->inactivity_timer.function = cfhsi_inactivity_tout;
Daniel Martensson687b13e2011-10-13 11:29:25 +00001250 /* Setup the slowpath RX timer. */
1251 init_timer(&cfhsi->rx_slowpath_timer);
1252 cfhsi->rx_slowpath_timer.data = (unsigned long)cfhsi;
1253 cfhsi->rx_slowpath_timer.function = cfhsi_rx_slowpath;
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +00001254 /* Setup the aggregation timer. */
1255 init_timer(&cfhsi->aggregation_timer);
1256 cfhsi->aggregation_timer.data = (unsigned long)cfhsi;
1257 cfhsi->aggregation_timer.function = cfhsi_aggregation_tout;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001258
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001259 /* Activate HSI interface. */
1260 res = cfhsi->dev->cfhsi_up(cfhsi->dev);
1261 if (res) {
Sjur Brændeland90de9bab2012-06-25 07:49:38 +00001262 netdev_err(cfhsi->ndev,
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001263 "%s: can't activate HSI interface: %d.\n",
1264 __func__, res);
1265 goto err_activate;
1266 }
1267
1268 /* Flush FIFO */
1269 res = cfhsi_flush_fifo(cfhsi);
1270 if (res) {
Sjur Brændeland90de9bab2012-06-25 07:49:38 +00001271 netdev_err(cfhsi->ndev, "%s: Can't flush FIFO: %d.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001272 __func__, res);
1273 goto err_net_reg;
1274 }
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001275 return res;
1276
1277 err_net_reg:
1278 cfhsi->dev->cfhsi_down(cfhsi->dev);
1279 err_activate:
1280 destroy_workqueue(cfhsi->wq);
1281 err_create_wq:
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +00001282 kfree(cfhsi->rx_flip_buf);
1283 err_alloc_rx_flip:
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001284 kfree(cfhsi->rx_buf);
1285 err_alloc_rx:
1286 kfree(cfhsi->tx_buf);
1287 err_alloc_tx:
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001288 return res;
1289}
1290
sjur.brandeland@stericsson.com39abbae2012-04-12 08:27:27 +00001291static int cfhsi_close(struct net_device *ndev)
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001292{
sjur.brandeland@stericsson.com39abbae2012-04-12 08:27:27 +00001293 struct cfhsi *cfhsi = netdev_priv(ndev);
sjur.brandeland@stericsson.com5f614e62012-04-12 08:18:08 +00001294 u8 *tx_buf, *rx_buf, *flip_buf;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001295
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001296 /* going to shutdown driver */
1297 set_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
1298
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001299 /* Flush workqueue */
1300 flush_workqueue(cfhsi->wq);
1301
Daniel Martensson687b13e2011-10-13 11:29:25 +00001302 /* Delete timers if pending */
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +00001303 del_timer_sync(&cfhsi->inactivity_timer);
Daniel Martensson687b13e2011-10-13 11:29:25 +00001304 del_timer_sync(&cfhsi->rx_slowpath_timer);
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +00001305 del_timer_sync(&cfhsi->aggregation_timer);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001306
1307 /* Cancel pending RX request (if any) */
1308 cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev);
1309
Daniel Martenssonca63f8c2011-10-13 11:29:26 +00001310 /* Destroy workqueue */
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001311 destroy_workqueue(cfhsi->wq);
1312
1313 /* Store bufferes: will be freed later. */
1314 tx_buf = cfhsi->tx_buf;
1315 rx_buf = cfhsi->rx_buf;
sjur.brandeland@stericsson.com5f614e62012-04-12 08:18:08 +00001316 flip_buf = cfhsi->rx_flip_buf;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001317 /* Flush transmit queues. */
1318 cfhsi_abort_tx(cfhsi);
1319
1320 /* Deactivate interface */
1321 cfhsi->dev->cfhsi_down(cfhsi->dev);
1322
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001323 /* Free buffers. */
1324 kfree(tx_buf);
1325 kfree(rx_buf);
sjur.brandeland@stericsson.com5f614e62012-04-12 08:18:08 +00001326 kfree(flip_buf);
sjur.brandeland@stericsson.com39abbae2012-04-12 08:27:27 +00001327 return 0;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001328}
1329
Sjur Brændelandc4125402012-06-25 07:49:41 +00001330static void cfhsi_uninit(struct net_device *dev)
1331{
1332 struct cfhsi *cfhsi = netdev_priv(dev);
1333 ASSERT_RTNL();
1334 symbol_put(cfhsi_get_device);
1335 list_del(&cfhsi->list);
1336}
1337
sjur.brandeland@stericsson.com39abbae2012-04-12 08:27:27 +00001338static const struct net_device_ops cfhsi_ops = {
Sjur Brændelandc4125402012-06-25 07:49:41 +00001339 .ndo_uninit = cfhsi_uninit,
sjur.brandeland@stericsson.com39abbae2012-04-12 08:27:27 +00001340 .ndo_open = cfhsi_open,
1341 .ndo_stop = cfhsi_close,
1342 .ndo_start_xmit = cfhsi_xmit
1343};
1344
Sjur Brændelandc4125402012-06-25 07:49:41 +00001345static void cfhsi_netlink_parms(struct nlattr *data[], struct cfhsi *cfhsi)
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001346{
Sjur Brændelandc4125402012-06-25 07:49:41 +00001347 int i;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001348
Sjur Brændelandc4125402012-06-25 07:49:41 +00001349 if (!data) {
1350 pr_debug("no params data found\n");
1351 return;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001352 }
Sjur Brændelandc4125402012-06-25 07:49:41 +00001353
1354 i = __IFLA_CAIF_HSI_INACTIVITY_TOUT;
1355 if (data[i])
1356 inactivity_timeout = nla_get_u32(data[i]);
1357
1358 i = __IFLA_CAIF_HSI_AGGREGATION_TOUT;
1359 if (data[i])
1360 aggregation_timeout = nla_get_u32(data[i]);
1361
1362 i = __IFLA_CAIF_HSI_HEAD_ALIGN;
1363 if (data[i])
1364 hsi_head_align = nla_get_u32(data[i]);
1365
1366 i = __IFLA_CAIF_HSI_TAIL_ALIGN;
1367 if (data[i])
1368 hsi_tail_align = nla_get_u32(data[i]);
1369
1370 i = __IFLA_CAIF_HSI_QHIGH_WATERMARK;
1371 if (data[i])
1372 hsi_high_threshold = nla_get_u32(data[i]);
1373}
1374
1375static int caif_hsi_changelink(struct net_device *dev, struct nlattr *tb[],
1376 struct nlattr *data[])
1377{
1378 cfhsi_netlink_parms(data, netdev_priv(dev));
1379 netdev_state_change(dev);
1380 return 0;
1381}
1382
1383static const struct nla_policy caif_hsi_policy[__IFLA_CAIF_HSI_MAX + 1] = {
1384 [__IFLA_CAIF_HSI_INACTIVITY_TOUT] = { .type = NLA_U32, .len = 4 },
1385 [__IFLA_CAIF_HSI_AGGREGATION_TOUT] = { .type = NLA_U32, .len = 4 },
1386 [__IFLA_CAIF_HSI_HEAD_ALIGN] = { .type = NLA_U32, .len = 4 },
1387 [__IFLA_CAIF_HSI_TAIL_ALIGN] = { .type = NLA_U32, .len = 4 },
1388 [__IFLA_CAIF_HSI_QHIGH_WATERMARK] = { .type = NLA_U32, .len = 4 },
1389 [__IFLA_CAIF_HSI_QLOW_WATERMARK] = { .type = NLA_U32, .len = 4 },
1390};
1391
1392static size_t caif_hsi_get_size(const struct net_device *dev)
1393{
1394 int i;
1395 size_t s = 0;
1396 for (i = __IFLA_CAIF_HSI_UNSPEC + 1; i < __IFLA_CAIF_HSI_MAX; i++)
1397 s += nla_total_size(caif_hsi_policy[i].len);
1398 return s;
1399}
1400
1401static int caif_hsi_fill_info(struct sk_buff *skb, const struct net_device *dev)
1402{
1403 if (nla_put_u32(skb, __IFLA_CAIF_HSI_INACTIVITY_TOUT,
1404 inactivity_timeout) ||
1405 nla_put_u32(skb, __IFLA_CAIF_HSI_AGGREGATION_TOUT,
1406 aggregation_timeout) ||
1407 nla_put_u32(skb, __IFLA_CAIF_HSI_HEAD_ALIGN, hsi_head_align) ||
1408 nla_put_u32(skb, __IFLA_CAIF_HSI_TAIL_ALIGN, hsi_tail_align) ||
1409 nla_put_u32(skb, __IFLA_CAIF_HSI_QHIGH_WATERMARK,
1410 hsi_high_threshold) ||
1411 nla_put_u32(skb, __IFLA_CAIF_HSI_QLOW_WATERMARK,
1412 hsi_low_threshold))
1413 return -EMSGSIZE;
1414
1415 return 0;
1416}
1417
1418static int caif_hsi_newlink(struct net *src_net, struct net_device *dev,
1419 struct nlattr *tb[], struct nlattr *data[])
1420{
1421 struct cfhsi *cfhsi = NULL;
1422 struct platform_device *(*get_dev)(void);
1423
1424 ASSERT_RTNL();
1425
1426 cfhsi = netdev_priv(dev);
1427 cfhsi_netlink_parms(data, cfhsi);
1428 dev_net_set(cfhsi->ndev, src_net);
1429
1430 get_dev = symbol_get(cfhsi_get_device);
1431 if (!get_dev) {
1432 pr_err("%s: failed to get the cfhsi device symbol\n", __func__);
1433 return -ENODEV;
1434 }
1435
1436 /* Assign the HSI device. */
1437 cfhsi->pdev = (*get_dev)();
1438 if (!cfhsi->pdev) {
1439 pr_err("%s: failed to get the cfhsi device\n", __func__);
1440 goto err;
1441 }
1442
1443 /* Assign the HSI device. */
1444 cfhsi->dev = cfhsi->pdev->dev.platform_data;
1445
1446 /* Assign the driver to this HSI device. */
1447 cfhsi->dev->drv = &cfhsi->drv;
1448
1449 if (register_netdevice(dev)) {
1450 pr_warn("%s: device rtml registration failed\n", __func__);
1451 goto err;
1452
1453 }
1454 /* Add CAIF HSI device to list. */
1455 list_add_tail(&cfhsi->list, &cfhsi_list);
1456
1457 return 0;
1458err:
1459 symbol_put(cfhsi_get_device);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001460 return -ENODEV;
1461}
1462
Sjur Brændelandc4125402012-06-25 07:49:41 +00001463static struct rtnl_link_ops caif_hsi_link_ops __read_mostly = {
1464 .kind = "cfhsi",
1465 .priv_size = sizeof(struct cfhsi),
1466 .setup = cfhsi_setup,
1467 .maxtype = __IFLA_CAIF_HSI_MAX,
1468 .policy = caif_hsi_policy,
1469 .newlink = caif_hsi_newlink,
1470 .changelink = caif_hsi_changelink,
1471 .get_size = caif_hsi_get_size,
1472 .fill_info = caif_hsi_fill_info,
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001473};
1474
1475static void __exit cfhsi_exit_module(void)
1476{
1477 struct list_head *list_node;
1478 struct list_head *n;
Sjur Brændelandc4125402012-06-25 07:49:41 +00001479 struct cfhsi *cfhsi;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001480
Sjur Brændelandc4125402012-06-25 07:49:41 +00001481 rtnl_link_unregister(&caif_hsi_link_ops);
1482
1483 rtnl_lock();
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001484 list_for_each_safe(list_node, n, &cfhsi_list) {
1485 cfhsi = list_entry(list_node, struct cfhsi, list);
Sjur Brændelandc4125402012-06-25 07:49:41 +00001486 unregister_netdev(cfhsi->ndev);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001487 }
Sjur Brændelandc4125402012-06-25 07:49:41 +00001488 rtnl_unlock();
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001489}
1490
1491static int __init cfhsi_init_module(void)
1492{
Sjur Brændelandc4125402012-06-25 07:49:41 +00001493 return rtnl_link_register(&caif_hsi_link_ops);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001494}
1495
1496module_init(cfhsi_init_module);
1497module_exit(cfhsi_exit_module);