blob: 087eb83454c0ad47e96093c3ba6977568a97ffa8 [file] [log] [blame]
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
4 * Author: Daniel Martensson / daniel.martensson@stericsson.com
5 * Dmitry.Tarnyagin / dmitry.tarnyagin@stericsson.com
6 * License terms: GNU General Public License (GPL) version 2.
7 */
8
sjur.brandeland@stericsson.com39abbae2012-04-12 08:27:27 +00009#define pr_fmt(fmt) KBUILD_MODNAME fmt
10
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +000011#include <linux/init.h>
12#include <linux/module.h>
13#include <linux/device.h>
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +000014#include <linux/netdevice.h>
15#include <linux/string.h>
16#include <linux/list.h>
17#include <linux/interrupt.h>
18#include <linux/delay.h>
19#include <linux/sched.h>
20#include <linux/if_arp.h>
21#include <linux/timer.h>
Sjur Brændelandc4125402012-06-25 07:49:41 +000022#include <net/rtnetlink.h>
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +000023#include <linux/pkt_sched.h>
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +000024#include <net/caif/caif_layer.h>
25#include <net/caif/caif_hsi.h>
26
27MODULE_LICENSE("GPL");
28MODULE_AUTHOR("Daniel Martensson<daniel.martensson@stericsson.com>");
29MODULE_DESCRIPTION("CAIF HSI driver");
30
31/* Returns the number of padding bytes for alignment. */
32#define PAD_POW2(x, pow) ((((x)&((pow)-1)) == 0) ? 0 :\
33 (((pow)-((x)&((pow)-1)))))
34
Sjur Brændeland91fa0cb2012-06-25 07:49:43 +000035static const struct cfhsi_config hsi_default_config = {
Dmitry Tarnyagin28bd2042011-10-13 11:29:27 +000036
Sjur Brændeland91fa0cb2012-06-25 07:49:43 +000037 /* Inactivity timeout on HSI, ms */
38 .inactivity_timeout = HZ,
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +000039
Sjur Brændeland91fa0cb2012-06-25 07:49:43 +000040 /* Aggregation timeout (ms) of zero means no aggregation is done*/
41 .aggregation_timeout = 1,
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +000042
Sjur Brændeland91fa0cb2012-06-25 07:49:43 +000043 /*
44 * HSI link layer flow-control thresholds.
45 * Threshold values for the HSI packet queue. Flow-control will be
46 * asserted when the number of packets exceeds q_high_mark. It will
47 * not be de-asserted before the number of packets drops below
48 * q_low_mark.
49 * Warning: A high threshold value might increase throughput but it
50 * will at the same time prevent channel prioritization and increase
51 * the risk of flooding the modem. The high threshold should be above
52 * the low.
53 */
54 .q_high_mark = 100,
55 .q_low_mark = 50,
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +000056
Sjur Brændeland91fa0cb2012-06-25 07:49:43 +000057 /*
58 * HSI padding options.
59 * Warning: must be a base of 2 (& operation used) and can not be zero !
60 */
61 .head_align = 4,
62 .tail_align = 4,
63};
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +000064
65#define ON 1
66#define OFF 0
67
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +000068static LIST_HEAD(cfhsi_list);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +000069
70static void cfhsi_inactivity_tout(unsigned long arg)
71{
72 struct cfhsi *cfhsi = (struct cfhsi *)arg;
73
Sjur Brændeland90de9bab2012-06-25 07:49:38 +000074 netdev_dbg(cfhsi->ndev, "%s.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +000075 __func__);
76
77 /* Schedule power down work queue. */
78 if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
79 queue_work(cfhsi->wq, &cfhsi->wake_down_work);
80}
81
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +000082static void cfhsi_update_aggregation_stats(struct cfhsi *cfhsi,
83 const struct sk_buff *skb,
84 int direction)
85{
86 struct caif_payload_info *info;
87 int hpad, tpad, len;
88
89 info = (struct caif_payload_info *)&skb->cb;
Sjur Brændeland91fa0cb2012-06-25 07:49:43 +000090 hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align);
91 tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +000092 len = skb->len + hpad + tpad;
93
94 if (direction > 0)
95 cfhsi->aggregation_len += len;
96 else if (direction < 0)
97 cfhsi->aggregation_len -= len;
98}
99
100static bool cfhsi_can_send_aggregate(struct cfhsi *cfhsi)
101{
102 int i;
103
Sjur Brændeland91fa0cb2012-06-25 07:49:43 +0000104 if (cfhsi->cfg.aggregation_timeout == 0)
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000105 return true;
106
107 for (i = 0; i < CFHSI_PRIO_BEBK; ++i) {
108 if (cfhsi->qhead[i].qlen)
109 return true;
110 }
111
112 /* TODO: Use aggregation_len instead */
113 if (cfhsi->qhead[CFHSI_PRIO_BEBK].qlen >= CFHSI_MAX_PKTS)
114 return true;
115
116 return false;
117}
118
119static struct sk_buff *cfhsi_dequeue(struct cfhsi *cfhsi)
120{
121 struct sk_buff *skb;
122 int i;
123
124 for (i = 0; i < CFHSI_PRIO_LAST; ++i) {
125 skb = skb_dequeue(&cfhsi->qhead[i]);
126 if (skb)
127 break;
128 }
129
130 return skb;
131}
132
133static int cfhsi_tx_queue_len(struct cfhsi *cfhsi)
134{
135 int i, len = 0;
136 for (i = 0; i < CFHSI_PRIO_LAST; ++i)
137 len += skb_queue_len(&cfhsi->qhead[i]);
138 return len;
139}
140
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000141static void cfhsi_abort_tx(struct cfhsi *cfhsi)
142{
143 struct sk_buff *skb;
144
145 for (;;) {
146 spin_lock_bh(&cfhsi->lock);
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000147 skb = cfhsi_dequeue(cfhsi);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000148 if (!skb)
149 break;
150
151 cfhsi->ndev->stats.tx_errors++;
152 cfhsi->ndev->stats.tx_dropped++;
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000153 cfhsi_update_aggregation_stats(cfhsi, skb, -1);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000154 spin_unlock_bh(&cfhsi->lock);
155 kfree_skb(skb);
156 }
157 cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
158 if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000159 mod_timer(&cfhsi->inactivity_timer,
Sjur Brændeland91fa0cb2012-06-25 07:49:43 +0000160 jiffies + cfhsi->cfg.inactivity_timeout);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000161 spin_unlock_bh(&cfhsi->lock);
162}
163
164static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
165{
166 char buffer[32]; /* Any reasonable value */
167 size_t fifo_occupancy;
168 int ret;
169
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000170 netdev_dbg(cfhsi->ndev, "%s.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000171 __func__);
172
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000173 do {
Sjur Brændeland1c385f12012-06-25 07:49:42 +0000174 ret = cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000175 &fifo_occupancy);
176 if (ret) {
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000177 netdev_warn(cfhsi->ndev,
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000178 "%s: can't get FIFO occupancy: %d.\n",
179 __func__, ret);
180 break;
181 } else if (!fifo_occupancy)
182 /* No more data, exitting normally */
183 break;
184
185 fifo_occupancy = min(sizeof(buffer), fifo_occupancy);
186 set_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
Sjur Brændeland1c385f12012-06-25 07:49:42 +0000187 ret = cfhsi->ops->cfhsi_rx(buffer, fifo_occupancy,
188 cfhsi->ops);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000189 if (ret) {
190 clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000191 netdev_warn(cfhsi->ndev,
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000192 "%s: can't read data: %d.\n",
193 __func__, ret);
194 break;
195 }
196
197 ret = 5 * HZ;
Daniel Martensson687b13e2011-10-13 11:29:25 +0000198 ret = wait_event_interruptible_timeout(cfhsi->flush_fifo_wait,
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000199 !test_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits), ret);
200
201 if (ret < 0) {
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000202 netdev_warn(cfhsi->ndev,
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000203 "%s: can't wait for flush complete: %d.\n",
204 __func__, ret);
205 break;
206 } else if (!ret) {
207 ret = -ETIMEDOUT;
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000208 netdev_warn(cfhsi->ndev,
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000209 "%s: timeout waiting for flush complete.\n",
210 __func__);
211 break;
212 }
213 } while (1);
214
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000215 return ret;
216}
217
218static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
219{
220 int nfrms = 0;
221 int pld_len = 0;
222 struct sk_buff *skb;
223 u8 *pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
224
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000225 skb = cfhsi_dequeue(cfhsi);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000226 if (!skb)
227 return 0;
228
sjur.brandeland@stericsson.com94230fe2011-10-13 11:29:22 +0000229 /* Clear offset. */
230 desc->offset = 0;
231
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000232 /* Check if we can embed a CAIF frame. */
233 if (skb->len < CFHSI_MAX_EMB_FRM_SZ) {
234 struct caif_payload_info *info;
Sjur Brændelandb42f7b52012-06-25 07:49:39 +0000235 int hpad;
236 int tpad;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000237
238 /* Calculate needed head alignment and tail alignment. */
239 info = (struct caif_payload_info *)&skb->cb;
240
Sjur Brændeland91fa0cb2012-06-25 07:49:43 +0000241 hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align);
242 tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000243
244 /* Check if frame still fits with added alignment. */
245 if ((skb->len + hpad + tpad) <= CFHSI_MAX_EMB_FRM_SZ) {
246 u8 *pemb = desc->emb_frm;
247 desc->offset = CFHSI_DESC_SHORT_SZ;
248 *pemb = (u8)(hpad - 1);
249 pemb += hpad;
250
251 /* Update network statistics. */
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000252 spin_lock_bh(&cfhsi->lock);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000253 cfhsi->ndev->stats.tx_packets++;
254 cfhsi->ndev->stats.tx_bytes += skb->len;
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000255 cfhsi_update_aggregation_stats(cfhsi, skb, -1);
256 spin_unlock_bh(&cfhsi->lock);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000257
258 /* Copy in embedded CAIF frame. */
259 skb_copy_bits(skb, 0, pemb, skb->len);
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000260
261 /* Consume the SKB */
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000262 consume_skb(skb);
263 skb = NULL;
264 }
sjur.brandeland@stericsson.com94230fe2011-10-13 11:29:22 +0000265 }
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000266
267 /* Create payload CAIF frames. */
268 pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
269 while (nfrms < CFHSI_MAX_PKTS) {
270 struct caif_payload_info *info;
Sjur Brændelandb42f7b52012-06-25 07:49:39 +0000271 int hpad;
272 int tpad;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000273
274 if (!skb)
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000275 skb = cfhsi_dequeue(cfhsi);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000276
277 if (!skb)
278 break;
279
280 /* Calculate needed head alignment and tail alignment. */
281 info = (struct caif_payload_info *)&skb->cb;
282
Sjur Brændeland91fa0cb2012-06-25 07:49:43 +0000283 hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align);
284 tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000285
286 /* Fill in CAIF frame length in descriptor. */
287 desc->cffrm_len[nfrms] = hpad + skb->len + tpad;
288
289 /* Fill head padding information. */
290 *pfrm = (u8)(hpad - 1);
291 pfrm += hpad;
292
293 /* Update network statistics. */
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000294 spin_lock_bh(&cfhsi->lock);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000295 cfhsi->ndev->stats.tx_packets++;
296 cfhsi->ndev->stats.tx_bytes += skb->len;
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000297 cfhsi_update_aggregation_stats(cfhsi, skb, -1);
298 spin_unlock_bh(&cfhsi->lock);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000299
300 /* Copy in CAIF frame. */
301 skb_copy_bits(skb, 0, pfrm, skb->len);
302
303 /* Update payload length. */
304 pld_len += desc->cffrm_len[nfrms];
305
306 /* Update frame pointer. */
307 pfrm += skb->len + tpad;
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000308
309 /* Consume the SKB */
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000310 consume_skb(skb);
311 skb = NULL;
312
313 /* Update number of frames. */
314 nfrms++;
315 }
316
317 /* Unused length fields should be zero-filled (according to SPEC). */
318 while (nfrms < CFHSI_MAX_PKTS) {
319 desc->cffrm_len[nfrms] = 0x0000;
320 nfrms++;
321 }
322
323 /* Check if we can piggy-back another descriptor. */
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000324 if (cfhsi_can_send_aggregate(cfhsi))
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000325 desc->header |= CFHSI_PIGGY_DESC;
326 else
327 desc->header &= ~CFHSI_PIGGY_DESC;
328
329 return CFHSI_DESC_SZ + pld_len;
330}
331
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000332static void cfhsi_start_tx(struct cfhsi *cfhsi)
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000333{
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000334 struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
335 int len, res;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000336
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000337 netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000338
339 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
340 return;
341
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000342 do {
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000343 /* Create HSI frame. */
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000344 len = cfhsi_tx_frm(desc, cfhsi);
345 if (!len) {
346 spin_lock_bh(&cfhsi->lock);
347 if (unlikely(cfhsi_tx_queue_len(cfhsi))) {
Dmitry Tarnyaginfe47f122011-10-13 11:29:23 +0000348 spin_unlock_bh(&cfhsi->lock);
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000349 res = -EAGAIN;
350 continue;
Dmitry Tarnyaginfe47f122011-10-13 11:29:23 +0000351 }
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000352 cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
353 /* Start inactivity timer. */
354 mod_timer(&cfhsi->inactivity_timer,
Sjur Brændeland91fa0cb2012-06-25 07:49:43 +0000355 jiffies + cfhsi->cfg.inactivity_timeout);
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000356 spin_unlock_bh(&cfhsi->lock);
357 break;
358 }
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000359
360 /* Set up new transfer. */
Sjur Brændeland1c385f12012-06-25 07:49:42 +0000361 res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000362 if (WARN_ON(res < 0))
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000363 netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000364 __func__, res);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000365 } while (res < 0);
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000366}
Dmitry Tarnyaginfe47f122011-10-13 11:29:23 +0000367
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000368static void cfhsi_tx_done(struct cfhsi *cfhsi)
369{
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000370 netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000371
372 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
373 return;
374
375 /*
376 * Send flow on if flow off has been previously signalled
377 * and number of packets is below low water mark.
378 */
379 spin_lock_bh(&cfhsi->lock);
380 if (cfhsi->flow_off_sent &&
Sjur Brændeland91fa0cb2012-06-25 07:49:43 +0000381 cfhsi_tx_queue_len(cfhsi) <= cfhsi->cfg.q_low_mark &&
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000382 cfhsi->cfdev.flowctrl) {
383
384 cfhsi->flow_off_sent = 0;
385 cfhsi->cfdev.flowctrl(cfhsi->ndev, ON);
386 }
387
388 if (cfhsi_can_send_aggregate(cfhsi)) {
389 spin_unlock_bh(&cfhsi->lock);
390 cfhsi_start_tx(cfhsi);
391 } else {
392 mod_timer(&cfhsi->aggregation_timer,
Sjur Brændeland91fa0cb2012-06-25 07:49:43 +0000393 jiffies + cfhsi->cfg.aggregation_timeout);
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000394 spin_unlock_bh(&cfhsi->lock);
395 }
396
Dmitry Tarnyaginfe47f122011-10-13 11:29:23 +0000397 return;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000398}
399
Sjur Brændeland1c385f12012-06-25 07:49:42 +0000400static void cfhsi_tx_done_cb(struct cfhsi_cb_ops *cb_ops)
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000401{
402 struct cfhsi *cfhsi;
403
Sjur Brændeland1c385f12012-06-25 07:49:42 +0000404 cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000405 netdev_dbg(cfhsi->ndev, "%s.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000406 __func__);
407
408 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
409 return;
Daniel Martensson687b13e2011-10-13 11:29:25 +0000410 cfhsi_tx_done(cfhsi);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000411}
412
Daniel Martensson5bbed922011-10-13 11:29:28 +0000413static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000414{
415 int xfer_sz = 0;
416 int nfrms = 0;
417 u16 *plen = NULL;
418 u8 *pfrm = NULL;
419
420 if ((desc->header & ~CFHSI_PIGGY_DESC) ||
421 (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000422 netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000423 __func__);
Daniel Martensson5bbed922011-10-13 11:29:28 +0000424 return -EPROTO;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000425 }
426
427 /* Check for embedded CAIF frame. */
428 if (desc->offset) {
429 struct sk_buff *skb;
430 u8 *dst = NULL;
Daniel Martensson687b13e2011-10-13 11:29:25 +0000431 int len = 0;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000432 pfrm = ((u8 *)desc) + desc->offset;
433
434 /* Remove offset padding. */
435 pfrm += *pfrm + 1;
436
437 /* Read length of CAIF frame (little endian). */
438 len = *pfrm;
439 len |= ((*(pfrm+1)) << 8) & 0xFF00;
440 len += 2; /* Add FCS fields. */
441
Daniel Martensson5bbed922011-10-13 11:29:28 +0000442 /* Sanity check length of CAIF frame. */
443 if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000444 netdev_err(cfhsi->ndev, "%s: Invalid length.\n",
Daniel Martensson5bbed922011-10-13 11:29:28 +0000445 __func__);
446 return -EPROTO;
447 }
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000448
449 /* Allocate SKB (OK even in IRQ context). */
Daniel Martensson687b13e2011-10-13 11:29:25 +0000450 skb = alloc_skb(len + 1, GFP_ATOMIC);
451 if (!skb) {
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000452 netdev_err(cfhsi->ndev, "%s: Out of memory !\n",
Daniel Martensson687b13e2011-10-13 11:29:25 +0000453 __func__);
454 return -ENOMEM;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000455 }
456 caif_assert(skb != NULL);
457
458 dst = skb_put(skb, len);
459 memcpy(dst, pfrm, len);
460
461 skb->protocol = htons(ETH_P_CAIF);
462 skb_reset_mac_header(skb);
463 skb->dev = cfhsi->ndev;
464
465 /*
Sjur Brændeland1c385f12012-06-25 07:49:42 +0000466 * We are in a callback handler and
467 * unfortunately we don't know what context we're
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000468 * running in.
469 */
470 if (in_interrupt())
471 netif_rx(skb);
472 else
473 netif_rx_ni(skb);
474
475 /* Update network statistics. */
476 cfhsi->ndev->stats.rx_packets++;
477 cfhsi->ndev->stats.rx_bytes += len;
478 }
479
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000480 /* Calculate transfer length. */
481 plen = desc->cffrm_len;
482 while (nfrms < CFHSI_MAX_PKTS && *plen) {
483 xfer_sz += *plen;
484 plen++;
485 nfrms++;
486 }
487
488 /* Check for piggy-backed descriptor. */
489 if (desc->header & CFHSI_PIGGY_DESC)
490 xfer_sz += CFHSI_DESC_SZ;
491
Daniel Martensson5bbed922011-10-13 11:29:28 +0000492 if ((xfer_sz % 4) || (xfer_sz > (CFHSI_BUF_SZ_RX - CFHSI_DESC_SZ))) {
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000493 netdev_err(cfhsi->ndev,
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000494 "%s: Invalid payload len: %d, ignored.\n",
495 __func__, xfer_sz);
Daniel Martensson5bbed922011-10-13 11:29:28 +0000496 return -EPROTO;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000497 }
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000498 return xfer_sz;
499}
500
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +0000501static int cfhsi_rx_desc_len(struct cfhsi_desc *desc)
502{
503 int xfer_sz = 0;
504 int nfrms = 0;
505 u16 *plen;
506
507 if ((desc->header & ~CFHSI_PIGGY_DESC) ||
508 (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
509
510 pr_err("Invalid descriptor. %x %x\n", desc->header,
511 desc->offset);
512 return -EPROTO;
513 }
514
515 /* Calculate transfer length. */
516 plen = desc->cffrm_len;
517 while (nfrms < CFHSI_MAX_PKTS && *plen) {
518 xfer_sz += *plen;
519 plen++;
520 nfrms++;
521 }
522
523 if (xfer_sz % 4) {
524 pr_err("Invalid payload len: %d, ignored.\n", xfer_sz);
525 return -EPROTO;
526 }
527 return xfer_sz;
528}
529
Daniel Martensson5bbed922011-10-13 11:29:28 +0000530static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000531{
532 int rx_sz = 0;
533 int nfrms = 0;
534 u16 *plen = NULL;
535 u8 *pfrm = NULL;
536
537 /* Sanity check header and offset. */
538 if (WARN_ON((desc->header & ~CFHSI_PIGGY_DESC) ||
539 (desc->offset > CFHSI_MAX_EMB_FRM_SZ))) {
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000540 netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000541 __func__);
Daniel Martensson5bbed922011-10-13 11:29:28 +0000542 return -EPROTO;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000543 }
544
545 /* Set frame pointer to start of payload. */
546 pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
547 plen = desc->cffrm_len;
Daniel Martensson687b13e2011-10-13 11:29:25 +0000548
549 /* Skip already processed frames. */
550 while (nfrms < cfhsi->rx_state.nfrms) {
551 pfrm += *plen;
552 rx_sz += *plen;
553 plen++;
554 nfrms++;
555 }
556
557 /* Parse payload. */
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000558 while (nfrms < CFHSI_MAX_PKTS && *plen) {
559 struct sk_buff *skb;
560 u8 *dst = NULL;
561 u8 *pcffrm = NULL;
Sjur Brændelandb42f7b52012-06-25 07:49:39 +0000562 int len;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000563
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000564 /* CAIF frame starts after head padding. */
565 pcffrm = pfrm + *pfrm + 1;
566
567 /* Read length of CAIF frame (little endian). */
568 len = *pcffrm;
569 len |= ((*(pcffrm + 1)) << 8) & 0xFF00;
570 len += 2; /* Add FCS fields. */
571
Daniel Martensson5bbed922011-10-13 11:29:28 +0000572 /* Sanity check length of CAIF frames. */
573 if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000574 netdev_err(cfhsi->ndev, "%s: Invalid length.\n",
Daniel Martensson5bbed922011-10-13 11:29:28 +0000575 __func__);
576 return -EPROTO;
577 }
578
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000579 /* Allocate SKB (OK even in IRQ context). */
Daniel Martensson687b13e2011-10-13 11:29:25 +0000580 skb = alloc_skb(len + 1, GFP_ATOMIC);
581 if (!skb) {
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000582 netdev_err(cfhsi->ndev, "%s: Out of memory !\n",
Daniel Martensson687b13e2011-10-13 11:29:25 +0000583 __func__);
584 cfhsi->rx_state.nfrms = nfrms;
585 return -ENOMEM;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000586 }
587 caif_assert(skb != NULL);
588
589 dst = skb_put(skb, len);
590 memcpy(dst, pcffrm, len);
591
592 skb->protocol = htons(ETH_P_CAIF);
593 skb_reset_mac_header(skb);
594 skb->dev = cfhsi->ndev;
595
596 /*
Sjur Brændeland1c385f12012-06-25 07:49:42 +0000597 * We're called in callback from HSI
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000598 * and don't know the context we're running in.
599 */
600 if (in_interrupt())
601 netif_rx(skb);
602 else
603 netif_rx_ni(skb);
604
605 /* Update network statistics. */
606 cfhsi->ndev->stats.rx_packets++;
607 cfhsi->ndev->stats.rx_bytes += len;
608
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000609 pfrm += *plen;
610 rx_sz += *plen;
611 plen++;
612 nfrms++;
613 }
614
615 return rx_sz;
616}
617
Daniel Martensson687b13e2011-10-13 11:29:25 +0000618static void cfhsi_rx_done(struct cfhsi *cfhsi)
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000619{
620 int res;
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +0000621 int desc_pld_len = 0, rx_len, rx_state;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000622 struct cfhsi_desc *desc = NULL;
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +0000623 u8 *rx_ptr, *rx_buf;
624 struct cfhsi_desc *piggy_desc = NULL;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000625
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000626 desc = (struct cfhsi_desc *)cfhsi->rx_buf;
627
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000628 netdev_dbg(cfhsi->ndev, "%s\n", __func__);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000629
630 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
631 return;
632
633 /* Update inactivity timer if pending. */
Dmitry Tarnyagin73033c92011-10-13 11:29:24 +0000634 spin_lock_bh(&cfhsi->lock);
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000635 mod_timer_pending(&cfhsi->inactivity_timer,
Sjur Brændeland91fa0cb2012-06-25 07:49:43 +0000636 jiffies + cfhsi->cfg.inactivity_timeout);
Dmitry Tarnyagin73033c92011-10-13 11:29:24 +0000637 spin_unlock_bh(&cfhsi->lock);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000638
Daniel Martensson687b13e2011-10-13 11:29:25 +0000639 if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +0000640 desc_pld_len = cfhsi_rx_desc_len(desc);
641
642 if (desc_pld_len < 0)
Daniel Martensson5bbed922011-10-13 11:29:28 +0000643 goto out_of_sync;
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +0000644
645 rx_buf = cfhsi->rx_buf;
646 rx_len = desc_pld_len;
647 if (desc_pld_len > 0 && (desc->header & CFHSI_PIGGY_DESC))
648 rx_len += CFHSI_DESC_SZ;
649 if (desc_pld_len == 0)
650 rx_buf = cfhsi->rx_flip_buf;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000651 } else {
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +0000652 rx_buf = cfhsi->rx_flip_buf;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000653
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +0000654 rx_len = CFHSI_DESC_SZ;
655 if (cfhsi->rx_state.pld_len > 0 &&
656 (desc->header & CFHSI_PIGGY_DESC)) {
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000657
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000658 piggy_desc = (struct cfhsi_desc *)
659 (desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ +
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +0000660 cfhsi->rx_state.pld_len);
661
Daniel Martensson687b13e2011-10-13 11:29:25 +0000662 cfhsi->rx_state.piggy_desc = true;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000663
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +0000664 /* Extract payload len from piggy-backed descriptor. */
665 desc_pld_len = cfhsi_rx_desc_len(piggy_desc);
666 if (desc_pld_len < 0)
667 goto out_of_sync;
668
Kim Lilliestierna XX4e7bb592012-06-25 07:49:36 +0000669 if (desc_pld_len > 0) {
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +0000670 rx_len = desc_pld_len;
Kim Lilliestierna XX4e7bb592012-06-25 07:49:36 +0000671 if (piggy_desc->header & CFHSI_PIGGY_DESC)
672 rx_len += CFHSI_DESC_SZ;
673 }
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000674
675 /*
676 * Copy needed information from the piggy-backed
677 * descriptor to the descriptor in the start.
678 */
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +0000679 memcpy(rx_buf, (u8 *)piggy_desc,
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000680 CFHSI_DESC_SHORT_SZ);
Daniel Martensson5bbed922011-10-13 11:29:28 +0000681 }
Daniel Martensson687b13e2011-10-13 11:29:25 +0000682 }
683
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000684 if (desc_pld_len) {
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +0000685 rx_state = CFHSI_RX_STATE_PAYLOAD;
686 rx_ptr = rx_buf + CFHSI_DESC_SZ;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000687 } else {
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +0000688 rx_state = CFHSI_RX_STATE_DESC;
689 rx_ptr = rx_buf;
690 rx_len = CFHSI_DESC_SZ;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000691 }
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000692
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +0000693 /* Initiate next read */
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000694 if (test_bit(CFHSI_AWAKE, &cfhsi->bits)) {
695 /* Set up new transfer. */
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000696 netdev_dbg(cfhsi->ndev, "%s: Start RX.\n",
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +0000697 __func__);
698
Sjur Brændeland1c385f12012-06-25 07:49:42 +0000699 res = cfhsi->ops->cfhsi_rx(rx_ptr, rx_len,
700 cfhsi->ops);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000701 if (WARN_ON(res < 0)) {
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000702 netdev_err(cfhsi->ndev, "%s: RX error %d.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000703 __func__, res);
704 cfhsi->ndev->stats.rx_errors++;
705 cfhsi->ndev->stats.rx_dropped++;
706 }
707 }
Daniel Martensson687b13e2011-10-13 11:29:25 +0000708
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +0000709 if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
710 /* Extract payload from descriptor */
711 if (cfhsi_rx_desc(desc, cfhsi) < 0)
712 goto out_of_sync;
713 } else {
714 /* Extract payload */
715 if (cfhsi_rx_pld(desc, cfhsi) < 0)
716 goto out_of_sync;
717 if (piggy_desc) {
718 /* Extract any payload in piggyback descriptor. */
719 if (cfhsi_rx_desc(piggy_desc, cfhsi) < 0)
720 goto out_of_sync;
Per Ellefsen39356002012-06-24 11:01:37 +0000721 /* Mark no embedded frame after extracting it */
722 piggy_desc->offset = 0;
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +0000723 }
Daniel Martensson687b13e2011-10-13 11:29:25 +0000724 }
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +0000725
726 /* Update state info */
727 memset(&cfhsi->rx_state, 0, sizeof(cfhsi->rx_state));
728 cfhsi->rx_state.state = rx_state;
729 cfhsi->rx_ptr = rx_ptr;
730 cfhsi->rx_len = rx_len;
731 cfhsi->rx_state.pld_len = desc_pld_len;
732 cfhsi->rx_state.piggy_desc = desc->header & CFHSI_PIGGY_DESC;
733
734 if (rx_buf != cfhsi->rx_buf)
735 swap(cfhsi->rx_buf, cfhsi->rx_flip_buf);
Daniel Martensson5bbed922011-10-13 11:29:28 +0000736 return;
737
738out_of_sync:
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000739 netdev_err(cfhsi->ndev, "%s: Out of sync.\n", __func__);
Daniel Martensson5bbed922011-10-13 11:29:28 +0000740 print_hex_dump_bytes("--> ", DUMP_PREFIX_NONE,
741 cfhsi->rx_buf, CFHSI_DESC_SZ);
742 schedule_work(&cfhsi->out_of_sync_work);
Daniel Martensson687b13e2011-10-13 11:29:25 +0000743}
744
745static void cfhsi_rx_slowpath(unsigned long arg)
746{
747 struct cfhsi *cfhsi = (struct cfhsi *)arg;
748
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000749 netdev_dbg(cfhsi->ndev, "%s.\n",
Daniel Martensson687b13e2011-10-13 11:29:25 +0000750 __func__);
751
752 cfhsi_rx_done(cfhsi);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000753}
754
Sjur Brændeland1c385f12012-06-25 07:49:42 +0000755static void cfhsi_rx_done_cb(struct cfhsi_cb_ops *cb_ops)
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000756{
757 struct cfhsi *cfhsi;
758
Sjur Brændeland1c385f12012-06-25 07:49:42 +0000759 cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000760 netdev_dbg(cfhsi->ndev, "%s.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000761 __func__);
762
763 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
764 return;
765
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000766 if (test_and_clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits))
767 wake_up_interruptible(&cfhsi->flush_fifo_wait);
768 else
Daniel Martensson687b13e2011-10-13 11:29:25 +0000769 cfhsi_rx_done(cfhsi);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000770}
771
772static void cfhsi_wake_up(struct work_struct *work)
773{
774 struct cfhsi *cfhsi = NULL;
775 int res;
776 int len;
777 long ret;
778
779 cfhsi = container_of(work, struct cfhsi, wake_up_work);
780
781 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
782 return;
783
784 if (unlikely(test_bit(CFHSI_AWAKE, &cfhsi->bits))) {
785 /* It happenes when wakeup is requested by
786 * both ends at the same time. */
787 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
Daniel Martensson5ea2ef52011-10-13 11:29:29 +0000788 clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000789 return;
790 }
791
792 /* Activate wake line. */
Sjur Brændeland1c385f12012-06-25 07:49:42 +0000793 cfhsi->ops->cfhsi_wake_up(cfhsi->ops);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000794
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000795 netdev_dbg(cfhsi->ndev, "%s: Start waiting.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000796 __func__);
797
798 /* Wait for acknowledge. */
Daniel Martensson687b13e2011-10-13 11:29:25 +0000799 ret = CFHSI_WAKE_TOUT;
800 ret = wait_event_interruptible_timeout(cfhsi->wake_up_wait,
801 test_and_clear_bit(CFHSI_WAKE_UP_ACK,
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000802 &cfhsi->bits), ret);
803 if (unlikely(ret < 0)) {
804 /* Interrupted by signal. */
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000805 netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000806 __func__, ret);
Daniel Martensson5ea2ef52011-10-13 11:29:29 +0000807
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000808 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
Sjur Brændeland1c385f12012-06-25 07:49:42 +0000809 cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000810 return;
811 } else if (!ret) {
Daniel Martensson5ea2ef52011-10-13 11:29:29 +0000812 bool ca_wake = false;
813 size_t fifo_occupancy = 0;
814
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000815 /* Wakeup timeout */
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000816 netdev_dbg(cfhsi->ndev, "%s: Timeout.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000817 __func__);
Daniel Martensson5ea2ef52011-10-13 11:29:29 +0000818
819 /* Check FIFO to check if modem has sent something. */
Sjur Brændeland1c385f12012-06-25 07:49:42 +0000820 WARN_ON(cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
Daniel Martensson5ea2ef52011-10-13 11:29:29 +0000821 &fifo_occupancy));
822
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000823 netdev_dbg(cfhsi->ndev, "%s: Bytes in FIFO: %u.\n",
Daniel Martensson5ea2ef52011-10-13 11:29:29 +0000824 __func__, (unsigned) fifo_occupancy);
825
826 /* Check if we misssed the interrupt. */
Sjur Brændeland1c385f12012-06-25 07:49:42 +0000827 WARN_ON(cfhsi->ops->cfhsi_get_peer_wake(cfhsi->ops,
Daniel Martensson5ea2ef52011-10-13 11:29:29 +0000828 &ca_wake));
829
830 if (ca_wake) {
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000831 netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n",
Daniel Martensson5ea2ef52011-10-13 11:29:29 +0000832 __func__);
833
834 /* Clear the CFHSI_WAKE_UP_ACK bit to prevent race. */
835 clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
836
837 /* Continue execution. */
838 goto wake_ack;
839 }
840
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000841 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
Sjur Brændeland1c385f12012-06-25 07:49:42 +0000842 cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000843 return;
844 }
Daniel Martensson5ea2ef52011-10-13 11:29:29 +0000845wake_ack:
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000846 netdev_dbg(cfhsi->ndev, "%s: Woken.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000847 __func__);
848
849 /* Clear power up bit. */
850 set_bit(CFHSI_AWAKE, &cfhsi->bits);
851 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
852
853 /* Resume read operation. */
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000854 netdev_dbg(cfhsi->ndev, "%s: Start RX.\n", __func__);
Sjur Brændeland1c385f12012-06-25 07:49:42 +0000855 res = cfhsi->ops->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len, cfhsi->ops);
Daniel Martensson687b13e2011-10-13 11:29:25 +0000856
857 if (WARN_ON(res < 0))
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000858 netdev_err(cfhsi->ndev, "%s: RX err %d.\n", __func__, res);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000859
860 /* Clear power up acknowledment. */
861 clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
862
863 spin_lock_bh(&cfhsi->lock);
864
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000865 /* Resume transmit if queues are not empty. */
866 if (!cfhsi_tx_queue_len(cfhsi)) {
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000867 netdev_dbg(cfhsi->ndev, "%s: Peer wake, start timer.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000868 __func__);
869 /* Start inactivity timer. */
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +0000870 mod_timer(&cfhsi->inactivity_timer,
Sjur Brændeland91fa0cb2012-06-25 07:49:43 +0000871 jiffies + cfhsi->cfg.inactivity_timeout);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000872 spin_unlock_bh(&cfhsi->lock);
873 return;
874 }
875
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000876 netdev_dbg(cfhsi->ndev, "%s: Host wake.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000877 __func__);
878
879 spin_unlock_bh(&cfhsi->lock);
880
881 /* Create HSI frame. */
882 len = cfhsi_tx_frm((struct cfhsi_desc *)cfhsi->tx_buf, cfhsi);
883
884 if (likely(len > 0)) {
885 /* Set up new transfer. */
Sjur Brændeland1c385f12012-06-25 07:49:42 +0000886 res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000887 if (WARN_ON(res < 0)) {
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000888 netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000889 __func__, res);
890 cfhsi_abort_tx(cfhsi);
891 }
892 } else {
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000893 netdev_err(cfhsi->ndev,
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000894 "%s: Failed to create HSI frame: %d.\n",
895 __func__, len);
896 }
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000897}
898
899static void cfhsi_wake_down(struct work_struct *work)
900{
901 long ret;
902 struct cfhsi *cfhsi = NULL;
Daniel Martensson687b13e2011-10-13 11:29:25 +0000903 size_t fifo_occupancy = 0;
904 int retry = CFHSI_WAKE_TOUT;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000905
906 cfhsi = container_of(work, struct cfhsi, wake_down_work);
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000907 netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000908
909 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
910 return;
911
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000912 /* Deactivate wake line. */
Sjur Brændeland1c385f12012-06-25 07:49:42 +0000913 cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000914
915 /* Wait for acknowledge. */
Daniel Martensson687b13e2011-10-13 11:29:25 +0000916 ret = CFHSI_WAKE_TOUT;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000917 ret = wait_event_interruptible_timeout(cfhsi->wake_down_wait,
Daniel Martensson687b13e2011-10-13 11:29:25 +0000918 test_and_clear_bit(CFHSI_WAKE_DOWN_ACK,
919 &cfhsi->bits), ret);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000920 if (ret < 0) {
921 /* Interrupted by signal. */
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000922 netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000923 __func__, ret);
924 return;
925 } else if (!ret) {
Daniel Martensson5ea2ef52011-10-13 11:29:29 +0000926 bool ca_wake = true;
927
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000928 /* Timeout */
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000929 netdev_err(cfhsi->ndev, "%s: Timeout.\n", __func__);
Daniel Martensson5ea2ef52011-10-13 11:29:29 +0000930
931 /* Check if we misssed the interrupt. */
Sjur Brændeland1c385f12012-06-25 07:49:42 +0000932 WARN_ON(cfhsi->ops->cfhsi_get_peer_wake(cfhsi->ops,
Daniel Martensson5ea2ef52011-10-13 11:29:29 +0000933 &ca_wake));
934 if (!ca_wake)
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000935 netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n",
Daniel Martensson5ea2ef52011-10-13 11:29:29 +0000936 __func__);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000937 }
938
Daniel Martensson687b13e2011-10-13 11:29:25 +0000939 /* Check FIFO occupancy. */
940 while (retry) {
Sjur Brændeland1c385f12012-06-25 07:49:42 +0000941 WARN_ON(cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
Daniel Martensson687b13e2011-10-13 11:29:25 +0000942 &fifo_occupancy));
943
944 if (!fifo_occupancy)
945 break;
946
947 set_current_state(TASK_INTERRUPTIBLE);
948 schedule_timeout(1);
949 retry--;
950 }
951
952 if (!retry)
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000953 netdev_err(cfhsi->ndev, "%s: FIFO Timeout.\n", __func__);
Daniel Martensson687b13e2011-10-13 11:29:25 +0000954
955 /* Clear AWAKE condition. */
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000956 clear_bit(CFHSI_AWAKE, &cfhsi->bits);
957
Daniel Martensson687b13e2011-10-13 11:29:25 +0000958 /* Cancel pending RX requests. */
Sjur Brændeland1c385f12012-06-25 07:49:42 +0000959 cfhsi->ops->cfhsi_rx_cancel(cfhsi->ops);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000960}
961
Daniel Martensson5bbed922011-10-13 11:29:28 +0000962static void cfhsi_out_of_sync(struct work_struct *work)
963{
964 struct cfhsi *cfhsi = NULL;
965
966 cfhsi = container_of(work, struct cfhsi, out_of_sync_work);
967
968 rtnl_lock();
969 dev_close(cfhsi->ndev);
970 rtnl_unlock();
971}
972
Sjur Brændeland1c385f12012-06-25 07:49:42 +0000973static void cfhsi_wake_up_cb(struct cfhsi_cb_ops *cb_ops)
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000974{
975 struct cfhsi *cfhsi = NULL;
976
Sjur Brændeland1c385f12012-06-25 07:49:42 +0000977 cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000978 netdev_dbg(cfhsi->ndev, "%s.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000979 __func__);
980
981 set_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
982 wake_up_interruptible(&cfhsi->wake_up_wait);
983
984 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
985 return;
986
987 /* Schedule wake up work queue if the peer initiates. */
988 if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
989 queue_work(cfhsi->wq, &cfhsi->wake_up_work);
990}
991
Sjur Brændeland1c385f12012-06-25 07:49:42 +0000992static void cfhsi_wake_down_cb(struct cfhsi_cb_ops *cb_ops)
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000993{
994 struct cfhsi *cfhsi = NULL;
995
Sjur Brændeland1c385f12012-06-25 07:49:42 +0000996 cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
Sjur Brændeland90de9bab2012-06-25 07:49:38 +0000997 netdev_dbg(cfhsi->ndev, "%s.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +0000998 __func__);
999
1000 /* Initiating low power is only permitted by the host (us). */
1001 set_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
1002 wake_up_interruptible(&cfhsi->wake_down_wait);
1003}
1004
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +00001005static void cfhsi_aggregation_tout(unsigned long arg)
1006{
1007 struct cfhsi *cfhsi = (struct cfhsi *)arg;
1008
Sjur Brændeland90de9bab2012-06-25 07:49:38 +00001009 netdev_dbg(cfhsi->ndev, "%s.\n",
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +00001010 __func__);
1011
1012 cfhsi_start_tx(cfhsi);
1013}
1014
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001015static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
1016{
1017 struct cfhsi *cfhsi = NULL;
1018 int start_xfer = 0;
1019 int timer_active;
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +00001020 int prio;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001021
1022 if (!dev)
1023 return -EINVAL;
1024
1025 cfhsi = netdev_priv(dev);
1026
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +00001027 switch (skb->priority) {
1028 case TC_PRIO_BESTEFFORT:
1029 case TC_PRIO_FILLER:
1030 case TC_PRIO_BULK:
1031 prio = CFHSI_PRIO_BEBK;
1032 break;
1033 case TC_PRIO_INTERACTIVE_BULK:
1034 prio = CFHSI_PRIO_VI;
1035 break;
1036 case TC_PRIO_INTERACTIVE:
1037 prio = CFHSI_PRIO_VO;
1038 break;
1039 case TC_PRIO_CONTROL:
1040 default:
1041 prio = CFHSI_PRIO_CTL;
1042 break;
1043 }
1044
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001045 spin_lock_bh(&cfhsi->lock);
1046
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +00001047 /* Update aggregation statistics */
1048 cfhsi_update_aggregation_stats(cfhsi, skb, 1);
1049
1050 /* Queue the SKB */
1051 skb_queue_tail(&cfhsi->qhead[prio], skb);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001052
1053 /* Sanity check; xmit should not be called after unregister_netdev */
1054 if (WARN_ON(test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))) {
1055 spin_unlock_bh(&cfhsi->lock);
1056 cfhsi_abort_tx(cfhsi);
1057 return -EINVAL;
1058 }
1059
1060 /* Send flow off if number of packets is above high water mark. */
1061 if (!cfhsi->flow_off_sent &&
Sjur Brændeland91fa0cb2012-06-25 07:49:43 +00001062 cfhsi_tx_queue_len(cfhsi) > cfhsi->cfg.q_high_mark &&
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001063 cfhsi->cfdev.flowctrl) {
1064 cfhsi->flow_off_sent = 1;
1065 cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF);
1066 }
1067
1068 if (cfhsi->tx_state == CFHSI_TX_STATE_IDLE) {
1069 cfhsi->tx_state = CFHSI_TX_STATE_XFER;
1070 start_xfer = 1;
1071 }
1072
Dmitry Tarnyagin73033c92011-10-13 11:29:24 +00001073 if (!start_xfer) {
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +00001074 /* Send aggregate if it is possible */
1075 bool aggregate_ready =
1076 cfhsi_can_send_aggregate(cfhsi) &&
1077 del_timer(&cfhsi->aggregation_timer) > 0;
Dmitry Tarnyagin73033c92011-10-13 11:29:24 +00001078 spin_unlock_bh(&cfhsi->lock);
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +00001079 if (aggregate_ready)
1080 cfhsi_start_tx(cfhsi);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001081 return 0;
Dmitry Tarnyagin73033c92011-10-13 11:29:24 +00001082 }
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001083
1084 /* Delete inactivity timer if started. */
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +00001085 timer_active = del_timer_sync(&cfhsi->inactivity_timer);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001086
Dmitry Tarnyagin73033c92011-10-13 11:29:24 +00001087 spin_unlock_bh(&cfhsi->lock);
1088
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001089 if (timer_active) {
1090 struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
1091 int len;
1092 int res;
1093
1094 /* Create HSI frame. */
1095 len = cfhsi_tx_frm(desc, cfhsi);
Roar Førdef84ea772011-12-06 12:15:44 +00001096 WARN_ON(!len);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001097
1098 /* Set up new transfer. */
Sjur Brændeland1c385f12012-06-25 07:49:42 +00001099 res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001100 if (WARN_ON(res < 0)) {
Sjur Brændeland90de9bab2012-06-25 07:49:38 +00001101 netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001102 __func__, res);
1103 cfhsi_abort_tx(cfhsi);
1104 }
1105 } else {
1106 /* Schedule wake up work queue if the we initiate. */
1107 if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
1108 queue_work(cfhsi->wq, &cfhsi->wake_up_work);
1109 }
1110
1111 return 0;
1112}
1113
Sjur Brændeland1c385f12012-06-25 07:49:42 +00001114static const struct net_device_ops cfhsi_netdevops;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001115
1116static void cfhsi_setup(struct net_device *dev)
1117{
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +00001118 int i;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001119 struct cfhsi *cfhsi = netdev_priv(dev);
1120 dev->features = 0;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001121 dev->type = ARPHRD_CAIF;
1122 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
Sjur Brændeland34efc282012-03-04 08:38:58 +00001123 dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001124 dev->tx_queue_len = 0;
1125 dev->destructor = free_netdev;
Sjur Brændeland1c385f12012-06-25 07:49:42 +00001126 dev->netdev_ops = &cfhsi_netdevops;
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +00001127 for (i = 0; i < CFHSI_PRIO_LAST; ++i)
1128 skb_queue_head_init(&cfhsi->qhead[i]);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001129 cfhsi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
1130 cfhsi->cfdev.use_frag = false;
1131 cfhsi->cfdev.use_stx = false;
1132 cfhsi->cfdev.use_fcs = false;
1133 cfhsi->ndev = dev;
1134}
1135
1136int cfhsi_probe(struct platform_device *pdev)
1137{
David S. Millerb26d3442012-06-28 17:37:00 -07001138 struct cfhsi_ops *(*get_ops)(void);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001139 struct cfhsi *cfhsi = NULL;
1140 struct net_device *ndev;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001141 int res;
1142
1143 ndev = alloc_netdev(sizeof(struct cfhsi), "cfhsi%d", cfhsi_setup);
Joe Perches7ac2ed02011-08-25 13:22:24 +00001144 if (!ndev)
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001145 return -ENODEV;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001146
1147 cfhsi = netdev_priv(ndev);
1148 cfhsi->ndev = ndev;
1149 cfhsi->pdev = pdev;
1150
David S. Millerb26d3442012-06-28 17:37:00 -07001151 get_ops = symbol_get(cfhsi_get_ops);
1152 if (!get_ops) {
1153 pr_err("%s: failed to get the cfhsi_ops\n", __func__);
1154 return -ENODEV;
1155 }
1156
sjur.brandeland@stericsson.com39abbae2012-04-12 08:27:27 +00001157 /* Assign the HSI device. */
David S. Millerb26d3442012-06-28 17:37:00 -07001158 cfhsi->ops = (*get_ops)();
1159 if (!cfhsi->ops) {
1160 pr_err("%s: failed to get the cfhsi_ops\n", __func__);
1161 goto err;
1162 }
sjur.brandeland@stericsson.com39abbae2012-04-12 08:27:27 +00001163
1164 /* Assign the driver to this HSI device. */
David S. Millerb26d3442012-06-28 17:37:00 -07001165 cfhsi->ops->cb_ops = &cfhsi->cb_ops;
1166 res = register_netdevice(ndev);
sjur.brandeland@stericsson.com39abbae2012-04-12 08:27:27 +00001167 if (res) {
1168 dev_err(&ndev->dev, "%s: Registration error: %d.\n",
1169 __func__, res);
1170 free_netdev(ndev);
1171 }
1172 /* Add CAIF HSI device to list. */
sjur.brandeland@stericsson.com39abbae2012-04-12 08:27:27 +00001173 list_add_tail(&cfhsi->list, &cfhsi_list);
sjur.brandeland@stericsson.com39abbae2012-04-12 08:27:27 +00001174
1175 return res;
David S. Millerb26d3442012-06-28 17:37:00 -07001176err:
1177 symbol_put(cfhsi_get_ops);
1178 return -ENODEV;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001179}
1180
sjur.brandeland@stericsson.com39abbae2012-04-12 08:27:27 +00001181static int cfhsi_open(struct net_device *ndev)
1182{
1183 struct cfhsi *cfhsi = netdev_priv(ndev);
1184 int res;
1185
1186 clear_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
1187
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001188 /* Initialize state vaiables. */
1189 cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
Daniel Martensson687b13e2011-10-13 11:29:25 +00001190 cfhsi->rx_state.state = CFHSI_RX_STATE_DESC;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001191
1192 /* Set flow info */
1193 cfhsi->flow_off_sent = 0;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001194
1195 /*
1196 * Allocate a TX buffer with the size of a HSI packet descriptors
1197 * and the necessary room for CAIF payload frames.
1198 */
1199 cfhsi->tx_buf = kzalloc(CFHSI_BUF_SZ_TX, GFP_KERNEL);
1200 if (!cfhsi->tx_buf) {
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001201 res = -ENODEV;
1202 goto err_alloc_tx;
1203 }
1204
1205 /*
1206 * Allocate a RX buffer with the size of two HSI packet descriptors and
1207 * the necessary room for CAIF payload frames.
1208 */
1209 cfhsi->rx_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL);
1210 if (!cfhsi->rx_buf) {
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001211 res = -ENODEV;
1212 goto err_alloc_rx;
1213 }
1214
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +00001215 cfhsi->rx_flip_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL);
1216 if (!cfhsi->rx_flip_buf) {
1217 res = -ENODEV;
1218 goto err_alloc_rx_flip;
1219 }
1220
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +00001221 /* Initialize aggregation timeout */
Sjur Brændeland91fa0cb2012-06-25 07:49:43 +00001222 cfhsi->cfg.aggregation_timeout = hsi_default_config.aggregation_timeout;
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +00001223
Dmitry Tarnyagin28bd2042011-10-13 11:29:27 +00001224 /* Initialize recieve vaiables. */
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001225 cfhsi->rx_ptr = cfhsi->rx_buf;
1226 cfhsi->rx_len = CFHSI_DESC_SZ;
1227
1228 /* Initialize spin locks. */
1229 spin_lock_init(&cfhsi->lock);
1230
1231 /* Set up the driver. */
Sjur Brændeland1c385f12012-06-25 07:49:42 +00001232 cfhsi->cb_ops.tx_done_cb = cfhsi_tx_done_cb;
1233 cfhsi->cb_ops.rx_done_cb = cfhsi_rx_done_cb;
1234 cfhsi->cb_ops.wake_up_cb = cfhsi_wake_up_cb;
1235 cfhsi->cb_ops.wake_down_cb = cfhsi_wake_down_cb;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001236
1237 /* Initialize the work queues. */
1238 INIT_WORK(&cfhsi->wake_up_work, cfhsi_wake_up);
1239 INIT_WORK(&cfhsi->wake_down_work, cfhsi_wake_down);
Daniel Martensson5bbed922011-10-13 11:29:28 +00001240 INIT_WORK(&cfhsi->out_of_sync_work, cfhsi_out_of_sync);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001241
1242 /* Clear all bit fields. */
1243 clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
1244 clear_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
1245 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
1246 clear_bit(CFHSI_AWAKE, &cfhsi->bits);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001247
1248 /* Create work thread. */
Sjur Brændeland1c385f12012-06-25 07:49:42 +00001249 cfhsi->wq = create_singlethread_workqueue(cfhsi->ndev->name);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001250 if (!cfhsi->wq) {
Sjur Brændeland90de9bab2012-06-25 07:49:38 +00001251 netdev_err(cfhsi->ndev, "%s: Failed to create work queue.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001252 __func__);
1253 res = -ENODEV;
1254 goto err_create_wq;
1255 }
1256
1257 /* Initialize wait queues. */
1258 init_waitqueue_head(&cfhsi->wake_up_wait);
1259 init_waitqueue_head(&cfhsi->wake_down_wait);
1260 init_waitqueue_head(&cfhsi->flush_fifo_wait);
1261
1262 /* Setup the inactivity timer. */
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +00001263 init_timer(&cfhsi->inactivity_timer);
1264 cfhsi->inactivity_timer.data = (unsigned long)cfhsi;
1265 cfhsi->inactivity_timer.function = cfhsi_inactivity_tout;
Daniel Martensson687b13e2011-10-13 11:29:25 +00001266 /* Setup the slowpath RX timer. */
1267 init_timer(&cfhsi->rx_slowpath_timer);
1268 cfhsi->rx_slowpath_timer.data = (unsigned long)cfhsi;
1269 cfhsi->rx_slowpath_timer.function = cfhsi_rx_slowpath;
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +00001270 /* Setup the aggregation timer. */
1271 init_timer(&cfhsi->aggregation_timer);
1272 cfhsi->aggregation_timer.data = (unsigned long)cfhsi;
1273 cfhsi->aggregation_timer.function = cfhsi_aggregation_tout;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001274
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001275 /* Activate HSI interface. */
Sjur Brændeland1c385f12012-06-25 07:49:42 +00001276 res = cfhsi->ops->cfhsi_up(cfhsi->ops);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001277 if (res) {
Sjur Brændeland90de9bab2012-06-25 07:49:38 +00001278 netdev_err(cfhsi->ndev,
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001279 "%s: can't activate HSI interface: %d.\n",
1280 __func__, res);
1281 goto err_activate;
1282 }
1283
1284 /* Flush FIFO */
1285 res = cfhsi_flush_fifo(cfhsi);
1286 if (res) {
Sjur Brændeland90de9bab2012-06-25 07:49:38 +00001287 netdev_err(cfhsi->ndev, "%s: Can't flush FIFO: %d.\n",
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001288 __func__, res);
1289 goto err_net_reg;
1290 }
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001291 return res;
1292
1293 err_net_reg:
Sjur Brændeland1c385f12012-06-25 07:49:42 +00001294 cfhsi->ops->cfhsi_down(cfhsi->ops);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001295 err_activate:
1296 destroy_workqueue(cfhsi->wq);
1297 err_create_wq:
sjur.brandeland@stericsson.com332ad432012-02-03 04:36:21 +00001298 kfree(cfhsi->rx_flip_buf);
1299 err_alloc_rx_flip:
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001300 kfree(cfhsi->rx_buf);
1301 err_alloc_rx:
1302 kfree(cfhsi->tx_buf);
1303 err_alloc_tx:
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001304 return res;
1305}
1306
sjur.brandeland@stericsson.com39abbae2012-04-12 08:27:27 +00001307static int cfhsi_close(struct net_device *ndev)
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001308{
sjur.brandeland@stericsson.com39abbae2012-04-12 08:27:27 +00001309 struct cfhsi *cfhsi = netdev_priv(ndev);
sjur.brandeland@stericsson.com5f614e62012-04-12 08:18:08 +00001310 u8 *tx_buf, *rx_buf, *flip_buf;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001311
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001312 /* going to shutdown driver */
1313 set_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
1314
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001315 /* Flush workqueue */
1316 flush_workqueue(cfhsi->wq);
1317
Daniel Martensson687b13e2011-10-13 11:29:25 +00001318 /* Delete timers if pending */
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +00001319 del_timer_sync(&cfhsi->inactivity_timer);
Daniel Martensson687b13e2011-10-13 11:29:25 +00001320 del_timer_sync(&cfhsi->rx_slowpath_timer);
Dmitry Tarnyaginece367d2012-04-12 08:27:25 +00001321 del_timer_sync(&cfhsi->aggregation_timer);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001322
1323 /* Cancel pending RX request (if any) */
Sjur Brændeland1c385f12012-06-25 07:49:42 +00001324 cfhsi->ops->cfhsi_rx_cancel(cfhsi->ops);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001325
Daniel Martenssonca63f8c2011-10-13 11:29:26 +00001326 /* Destroy workqueue */
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001327 destroy_workqueue(cfhsi->wq);
1328
1329 /* Store bufferes: will be freed later. */
1330 tx_buf = cfhsi->tx_buf;
1331 rx_buf = cfhsi->rx_buf;
sjur.brandeland@stericsson.com5f614e62012-04-12 08:18:08 +00001332 flip_buf = cfhsi->rx_flip_buf;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001333 /* Flush transmit queues. */
1334 cfhsi_abort_tx(cfhsi);
1335
1336 /* Deactivate interface */
Sjur Brændeland1c385f12012-06-25 07:49:42 +00001337 cfhsi->ops->cfhsi_down(cfhsi->ops);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001338
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001339 /* Free buffers. */
1340 kfree(tx_buf);
1341 kfree(rx_buf);
sjur.brandeland@stericsson.com5f614e62012-04-12 08:18:08 +00001342 kfree(flip_buf);
sjur.brandeland@stericsson.com39abbae2012-04-12 08:27:27 +00001343 return 0;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001344}
1345
Sjur Brændelandc4125402012-06-25 07:49:41 +00001346static void cfhsi_uninit(struct net_device *dev)
1347{
1348 struct cfhsi *cfhsi = netdev_priv(dev);
1349 ASSERT_RTNL();
1350 symbol_put(cfhsi_get_device);
1351 list_del(&cfhsi->list);
1352}
1353
Sjur Brændeland1c385f12012-06-25 07:49:42 +00001354static const struct net_device_ops cfhsi_netdevops = {
Sjur Brændelandc4125402012-06-25 07:49:41 +00001355 .ndo_uninit = cfhsi_uninit,
sjur.brandeland@stericsson.com39abbae2012-04-12 08:27:27 +00001356 .ndo_open = cfhsi_open,
1357 .ndo_stop = cfhsi_close,
1358 .ndo_start_xmit = cfhsi_xmit
1359};
1360
Sjur Brændelandc4125402012-06-25 07:49:41 +00001361static void cfhsi_netlink_parms(struct nlattr *data[], struct cfhsi *cfhsi)
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001362{
Sjur Brændelandc4125402012-06-25 07:49:41 +00001363 int i;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001364
Sjur Brændelandc4125402012-06-25 07:49:41 +00001365 if (!data) {
1366 pr_debug("no params data found\n");
1367 return;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001368 }
Sjur Brændelandc4125402012-06-25 07:49:41 +00001369
1370 i = __IFLA_CAIF_HSI_INACTIVITY_TOUT;
Sjur Brændeland91fa0cb2012-06-25 07:49:43 +00001371 /*
1372 * Inactivity timeout in millisecs. Lowest possible value is 1,
1373 * and highest possible is NEXT_TIMER_MAX_DELTA.
1374 */
1375 if (data[i]) {
1376 u32 inactivity_timeout = nla_get_u32(data[i]);
1377 /* Pre-calculate inactivity timeout. */
1378 cfhsi->cfg.inactivity_timeout = inactivity_timeout * HZ / 1000;
1379 if (cfhsi->cfg.inactivity_timeout == 0)
1380 cfhsi->cfg.inactivity_timeout = 1;
1381 else if (cfhsi->cfg.inactivity_timeout > NEXT_TIMER_MAX_DELTA)
1382 cfhsi->cfg.inactivity_timeout = NEXT_TIMER_MAX_DELTA;
1383 }
Sjur Brændelandc4125402012-06-25 07:49:41 +00001384
1385 i = __IFLA_CAIF_HSI_AGGREGATION_TOUT;
1386 if (data[i])
Sjur Brændeland91fa0cb2012-06-25 07:49:43 +00001387 cfhsi->cfg.aggregation_timeout = nla_get_u32(data[i]);
Sjur Brændelandc4125402012-06-25 07:49:41 +00001388
1389 i = __IFLA_CAIF_HSI_HEAD_ALIGN;
1390 if (data[i])
Sjur Brændeland91fa0cb2012-06-25 07:49:43 +00001391 cfhsi->cfg.head_align = nla_get_u32(data[i]);
Sjur Brændelandc4125402012-06-25 07:49:41 +00001392
1393 i = __IFLA_CAIF_HSI_TAIL_ALIGN;
1394 if (data[i])
Sjur Brændeland91fa0cb2012-06-25 07:49:43 +00001395 cfhsi->cfg.tail_align = nla_get_u32(data[i]);
Sjur Brændelandc4125402012-06-25 07:49:41 +00001396
1397 i = __IFLA_CAIF_HSI_QHIGH_WATERMARK;
1398 if (data[i])
Sjur Brændeland91fa0cb2012-06-25 07:49:43 +00001399 cfhsi->cfg.q_high_mark = nla_get_u32(data[i]);
1400
1401 i = __IFLA_CAIF_HSI_QLOW_WATERMARK;
1402 if (data[i])
1403 cfhsi->cfg.q_low_mark = nla_get_u32(data[i]);
Sjur Brændelandc4125402012-06-25 07:49:41 +00001404}
1405
1406static int caif_hsi_changelink(struct net_device *dev, struct nlattr *tb[],
1407 struct nlattr *data[])
1408{
1409 cfhsi_netlink_parms(data, netdev_priv(dev));
1410 netdev_state_change(dev);
1411 return 0;
1412}
1413
1414static const struct nla_policy caif_hsi_policy[__IFLA_CAIF_HSI_MAX + 1] = {
1415 [__IFLA_CAIF_HSI_INACTIVITY_TOUT] = { .type = NLA_U32, .len = 4 },
1416 [__IFLA_CAIF_HSI_AGGREGATION_TOUT] = { .type = NLA_U32, .len = 4 },
1417 [__IFLA_CAIF_HSI_HEAD_ALIGN] = { .type = NLA_U32, .len = 4 },
1418 [__IFLA_CAIF_HSI_TAIL_ALIGN] = { .type = NLA_U32, .len = 4 },
1419 [__IFLA_CAIF_HSI_QHIGH_WATERMARK] = { .type = NLA_U32, .len = 4 },
1420 [__IFLA_CAIF_HSI_QLOW_WATERMARK] = { .type = NLA_U32, .len = 4 },
1421};
1422
1423static size_t caif_hsi_get_size(const struct net_device *dev)
1424{
1425 int i;
1426 size_t s = 0;
1427 for (i = __IFLA_CAIF_HSI_UNSPEC + 1; i < __IFLA_CAIF_HSI_MAX; i++)
1428 s += nla_total_size(caif_hsi_policy[i].len);
1429 return s;
1430}
1431
1432static int caif_hsi_fill_info(struct sk_buff *skb, const struct net_device *dev)
1433{
Sjur Brændeland91fa0cb2012-06-25 07:49:43 +00001434 struct cfhsi *cfhsi = netdev_priv(dev);
1435
Sjur Brændelandc4125402012-06-25 07:49:41 +00001436 if (nla_put_u32(skb, __IFLA_CAIF_HSI_INACTIVITY_TOUT,
Sjur Brændeland91fa0cb2012-06-25 07:49:43 +00001437 cfhsi->cfg.inactivity_timeout) ||
Sjur Brændelandc4125402012-06-25 07:49:41 +00001438 nla_put_u32(skb, __IFLA_CAIF_HSI_AGGREGATION_TOUT,
Sjur Brændeland91fa0cb2012-06-25 07:49:43 +00001439 cfhsi->cfg.aggregation_timeout) ||
1440 nla_put_u32(skb, __IFLA_CAIF_HSI_HEAD_ALIGN,
1441 cfhsi->cfg.head_align) ||
1442 nla_put_u32(skb, __IFLA_CAIF_HSI_TAIL_ALIGN,
1443 cfhsi->cfg.tail_align) ||
Sjur Brændelandc4125402012-06-25 07:49:41 +00001444 nla_put_u32(skb, __IFLA_CAIF_HSI_QHIGH_WATERMARK,
Sjur Brændeland91fa0cb2012-06-25 07:49:43 +00001445 cfhsi->cfg.q_high_mark) ||
Sjur Brændelandc4125402012-06-25 07:49:41 +00001446 nla_put_u32(skb, __IFLA_CAIF_HSI_QLOW_WATERMARK,
Sjur Brændeland91fa0cb2012-06-25 07:49:43 +00001447 cfhsi->cfg.q_low_mark))
Sjur Brændelandc4125402012-06-25 07:49:41 +00001448 return -EMSGSIZE;
1449
1450 return 0;
1451}
1452
1453static int caif_hsi_newlink(struct net *src_net, struct net_device *dev,
1454 struct nlattr *tb[], struct nlattr *data[])
1455{
1456 struct cfhsi *cfhsi = NULL;
Sjur Brændelandc4125402012-06-25 07:49:41 +00001457
1458 ASSERT_RTNL();
1459
1460 cfhsi = netdev_priv(dev);
1461 cfhsi_netlink_parms(data, cfhsi);
1462 dev_net_set(cfhsi->ndev, src_net);
1463
Sjur Brændelandc4125402012-06-25 07:49:41 +00001464 return 0;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001465}
1466
Sjur Brændelandc4125402012-06-25 07:49:41 +00001467static struct rtnl_link_ops caif_hsi_link_ops __read_mostly = {
1468 .kind = "cfhsi",
1469 .priv_size = sizeof(struct cfhsi),
1470 .setup = cfhsi_setup,
1471 .maxtype = __IFLA_CAIF_HSI_MAX,
1472 .policy = caif_hsi_policy,
1473 .newlink = caif_hsi_newlink,
1474 .changelink = caif_hsi_changelink,
1475 .get_size = caif_hsi_get_size,
1476 .fill_info = caif_hsi_fill_info,
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001477};
1478
1479static void __exit cfhsi_exit_module(void)
1480{
1481 struct list_head *list_node;
1482 struct list_head *n;
Sjur Brændelandc4125402012-06-25 07:49:41 +00001483 struct cfhsi *cfhsi;
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001484
Sjur Brændelandc4125402012-06-25 07:49:41 +00001485 rtnl_link_unregister(&caif_hsi_link_ops);
1486
1487 rtnl_lock();
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001488 list_for_each_safe(list_node, n, &cfhsi_list) {
1489 cfhsi = list_entry(list_node, struct cfhsi, list);
Sjur Brændelandc4125402012-06-25 07:49:41 +00001490 unregister_netdev(cfhsi->ndev);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001491 }
Sjur Brændelandc4125402012-06-25 07:49:41 +00001492 rtnl_unlock();
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001493}
1494
1495static int __init cfhsi_init_module(void)
1496{
Sjur Brændelandc4125402012-06-25 07:49:41 +00001497 return rtnl_link_register(&caif_hsi_link_ops);
Dmitry.Tarnyagin40d69042011-06-01 03:29:18 +00001498}
1499
1500module_init(cfhsi_init_module);
1501module_exit(cfhsi_exit_module);