blob: 49b91b2c113ca3f1ad5c100c560d64b3a07edb9f [file] [log] [blame]
Zhao Qiangc19b6d22016-06-06 14:30:02 +08001/* Freescale QUICC Engine HDLC Device Driver
2 *
3 * Copyright 2016 Freescale Semiconductor Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 */
10
11#include <linux/delay.h>
12#include <linux/dma-mapping.h>
13#include <linux/hdlc.h>
14#include <linux/init.h>
15#include <linux/interrupt.h>
16#include <linux/io.h>
17#include <linux/irq.h>
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/netdevice.h>
21#include <linux/of_address.h>
22#include <linux/of_irq.h>
23#include <linux/of_platform.h>
24#include <linux/platform_device.h>
25#include <linux/sched.h>
26#include <linux/skbuff.h>
27#include <linux/slab.h>
28#include <linux/spinlock.h>
29#include <linux/stddef.h>
30#include <soc/fsl/qe/qe_tdm.h>
31#include <uapi/linux/if_arp.h>
32
33#include "fsl_ucc_hdlc.h"
34
35#define DRV_DESC "Freescale QE UCC HDLC Driver"
36#define DRV_NAME "ucc_hdlc"
37
38#define TDM_PPPOHT_SLIC_MAXIN
Zhao Qiangc19b6d22016-06-06 14:30:02 +080039
40static struct ucc_tdm_info utdm_primary_info = {
41 .uf_info = {
42 .tsa = 0,
43 .cdp = 0,
44 .cds = 1,
45 .ctsp = 1,
46 .ctss = 1,
47 .revd = 0,
48 .urfs = 256,
49 .utfs = 256,
50 .urfet = 128,
51 .urfset = 192,
52 .utfet = 128,
53 .utftt = 0x40,
54 .ufpt = 256,
55 .mode = UCC_FAST_PROTOCOL_MODE_HDLC,
56 .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL,
57 .tenc = UCC_FAST_TX_ENCODING_NRZ,
58 .renc = UCC_FAST_RX_ENCODING_NRZ,
59 .tcrc = UCC_FAST_16_BIT_CRC,
60 .synl = UCC_FAST_SYNC_LEN_NOT_USED,
61 },
62
63 .si_info = {
64#ifdef TDM_PPPOHT_SLIC_MAXIN
65 .simr_rfsd = 1,
66 .simr_tfsd = 2,
67#else
68 .simr_rfsd = 0,
69 .simr_tfsd = 0,
70#endif
71 .simr_crt = 0,
72 .simr_sl = 0,
73 .simr_ce = 1,
74 .simr_fe = 1,
75 .simr_gm = 0,
76 },
77};
78
79static struct ucc_tdm_info utdm_info[MAX_HDLC_NUM];
80
81static int uhdlc_init(struct ucc_hdlc_private *priv)
82{
83 struct ucc_tdm_info *ut_info;
84 struct ucc_fast_info *uf_info;
85 u32 cecr_subblock;
86 u16 bd_status;
87 int ret, i;
88 void *bd_buffer;
89 dma_addr_t bd_dma_addr;
90 u32 riptr;
91 u32 tiptr;
92 u32 gumr;
93
94 ut_info = priv->ut_info;
95 uf_info = &ut_info->uf_info;
96
97 if (priv->tsa) {
98 uf_info->tsa = 1;
99 uf_info->ctsp = 1;
100 }
101 uf_info->uccm_mask = ((UCC_HDLC_UCCE_RXB | UCC_HDLC_UCCE_RXF |
102 UCC_HDLC_UCCE_TXB) << 16);
103
104 ret = ucc_fast_init(uf_info, &priv->uccf);
105 if (ret) {
106 dev_err(priv->dev, "Failed to init uccf.");
107 return ret;
108 }
109
110 priv->uf_regs = priv->uccf->uf_regs;
111 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
112
113 /* Loopback mode */
114 if (priv->loopback) {
115 dev_info(priv->dev, "Loopback Mode\n");
116 gumr = ioread32be(&priv->uf_regs->gumr);
117 gumr |= (UCC_FAST_GUMR_LOOPBACK | UCC_FAST_GUMR_CDS |
118 UCC_FAST_GUMR_TCI);
119 gumr &= ~(UCC_FAST_GUMR_CTSP | UCC_FAST_GUMR_RSYN);
120 iowrite32be(gumr, &priv->uf_regs->gumr);
121 }
122
123 /* Initialize SI */
124 if (priv->tsa)
125 ucc_tdm_init(priv->utdm, priv->ut_info);
126
127 /* Write to QE CECR, UCCx channel to Stop Transmission */
128 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
129 ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock,
130 QE_CR_PROTOCOL_UNSPECIFIED, 0);
131
132 /* Set UPSMR normal mode (need fixed)*/
133 iowrite32be(0, &priv->uf_regs->upsmr);
134
135 priv->rx_ring_size = RX_BD_RING_LEN;
136 priv->tx_ring_size = TX_BD_RING_LEN;
137 /* Alloc Rx BD */
138 priv->rx_bd_base = dma_alloc_coherent(priv->dev,
Holger Brunck5b8aad92017-05-17 17:24:35 +0200139 RX_BD_RING_LEN * sizeof(struct qe_bd),
Zhao Qiangc19b6d22016-06-06 14:30:02 +0800140 &priv->dma_rx_bd, GFP_KERNEL);
141
142 if (!priv->rx_bd_base) {
143 dev_err(priv->dev, "Cannot allocate MURAM memory for RxBDs\n");
144 ret = -ENOMEM;
Zhao Qiang1efb5972016-07-15 10:38:25 +0800145 goto free_uccf;
Zhao Qiangc19b6d22016-06-06 14:30:02 +0800146 }
147
148 /* Alloc Tx BD */
149 priv->tx_bd_base = dma_alloc_coherent(priv->dev,
Holger Brunck5b8aad92017-05-17 17:24:35 +0200150 TX_BD_RING_LEN * sizeof(struct qe_bd),
Zhao Qiangc19b6d22016-06-06 14:30:02 +0800151 &priv->dma_tx_bd, GFP_KERNEL);
152
153 if (!priv->tx_bd_base) {
154 dev_err(priv->dev, "Cannot allocate MURAM memory for TxBDs\n");
155 ret = -ENOMEM;
Zhao Qiang1efb5972016-07-15 10:38:25 +0800156 goto free_rx_bd;
Zhao Qiangc19b6d22016-06-06 14:30:02 +0800157 }
158
159 /* Alloc parameter ram for ucc hdlc */
160 priv->ucc_pram_offset = qe_muram_alloc(sizeof(priv->ucc_pram),
161 ALIGNMENT_OF_UCC_HDLC_PRAM);
162
163 if (priv->ucc_pram_offset < 0) {
Colin Ian King24a24d02016-08-28 11:40:41 +0100164 dev_err(priv->dev, "Can not allocate MURAM for hdlc parameter.\n");
Zhao Qiangc19b6d22016-06-06 14:30:02 +0800165 ret = -ENOMEM;
Zhao Qiang1efb5972016-07-15 10:38:25 +0800166 goto free_tx_bd;
Zhao Qiangc19b6d22016-06-06 14:30:02 +0800167 }
168
169 priv->rx_skbuff = kzalloc(priv->rx_ring_size * sizeof(*priv->rx_skbuff),
170 GFP_KERNEL);
171 if (!priv->rx_skbuff)
Zhao Qiang1efb5972016-07-15 10:38:25 +0800172 goto free_ucc_pram;
Zhao Qiangc19b6d22016-06-06 14:30:02 +0800173
174 priv->tx_skbuff = kzalloc(priv->tx_ring_size * sizeof(*priv->tx_skbuff),
175 GFP_KERNEL);
176 if (!priv->tx_skbuff)
Zhao Qiang1efb5972016-07-15 10:38:25 +0800177 goto free_rx_skbuff;
Zhao Qiangc19b6d22016-06-06 14:30:02 +0800178
179 priv->skb_curtx = 0;
180 priv->skb_dirtytx = 0;
181 priv->curtx_bd = priv->tx_bd_base;
182 priv->dirty_tx = priv->tx_bd_base;
183 priv->currx_bd = priv->rx_bd_base;
184 priv->currx_bdnum = 0;
185
186 /* init parameter base */
187 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
188 ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
189 QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
190
191 priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
192 qe_muram_addr(priv->ucc_pram_offset);
193
194 /* Zero out parameter ram */
195 memset_io(priv->ucc_pram, 0, sizeof(struct ucc_hdlc_param));
196
197 /* Alloc riptr, tiptr */
198 riptr = qe_muram_alloc(32, 32);
199 if (riptr < 0) {
200 dev_err(priv->dev, "Cannot allocate MURAM mem for Receive internal temp data pointer\n");
201 ret = -ENOMEM;
Zhao Qiang1efb5972016-07-15 10:38:25 +0800202 goto free_tx_skbuff;
Zhao Qiangc19b6d22016-06-06 14:30:02 +0800203 }
204
205 tiptr = qe_muram_alloc(32, 32);
206 if (tiptr < 0) {
207 dev_err(priv->dev, "Cannot allocate MURAM mem for Transmit internal temp data pointer\n");
208 ret = -ENOMEM;
Zhao Qiang1efb5972016-07-15 10:38:25 +0800209 goto free_riptr;
Zhao Qiangc19b6d22016-06-06 14:30:02 +0800210 }
211
212 /* Set RIPTR, TIPTR */
213 iowrite16be(riptr, &priv->ucc_pram->riptr);
214 iowrite16be(tiptr, &priv->ucc_pram->tiptr);
215
216 /* Set MRBLR */
217 iowrite16be(MAX_RX_BUF_LENGTH, &priv->ucc_pram->mrblr);
218
219 /* Set RBASE, TBASE */
220 iowrite32be(priv->dma_rx_bd, &priv->ucc_pram->rbase);
221 iowrite32be(priv->dma_tx_bd, &priv->ucc_pram->tbase);
222
223 /* Set RSTATE, TSTATE */
224 iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->rstate);
225 iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->tstate);
226
227 /* Set C_MASK, C_PRES for 16bit CRC */
228 iowrite32be(CRC_16BIT_MASK, &priv->ucc_pram->c_mask);
229 iowrite32be(CRC_16BIT_PRES, &priv->ucc_pram->c_pres);
230
231 iowrite16be(MAX_FRAME_LENGTH, &priv->ucc_pram->mflr);
232 iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfthr);
233 iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfcnt);
234 iowrite16be(DEFAULT_ADDR_MASK, &priv->ucc_pram->hmask);
235 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr1);
236 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr2);
237 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr3);
238 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr4);
239
240 /* Get BD buffer */
241 bd_buffer = dma_alloc_coherent(priv->dev,
242 (RX_BD_RING_LEN + TX_BD_RING_LEN) *
243 MAX_RX_BUF_LENGTH,
244 &bd_dma_addr, GFP_KERNEL);
245
246 if (!bd_buffer) {
247 dev_err(priv->dev, "Could not allocate buffer descriptors\n");
248 ret = -ENOMEM;
Zhao Qiang1efb5972016-07-15 10:38:25 +0800249 goto free_tiptr;
Zhao Qiangc19b6d22016-06-06 14:30:02 +0800250 }
251
252 memset(bd_buffer, 0, (RX_BD_RING_LEN + TX_BD_RING_LEN)
253 * MAX_RX_BUF_LENGTH);
254
255 priv->rx_buffer = bd_buffer;
256 priv->tx_buffer = bd_buffer + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
257
258 priv->dma_rx_addr = bd_dma_addr;
259 priv->dma_tx_addr = bd_dma_addr + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
260
261 for (i = 0; i < RX_BD_RING_LEN; i++) {
262 if (i < (RX_BD_RING_LEN - 1))
263 bd_status = R_E_S | R_I_S;
264 else
265 bd_status = R_E_S | R_I_S | R_W_S;
266
267 iowrite16be(bd_status, &priv->rx_bd_base[i].status);
268 iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH,
269 &priv->rx_bd_base[i].buf);
270 }
271
272 for (i = 0; i < TX_BD_RING_LEN; i++) {
273 if (i < (TX_BD_RING_LEN - 1))
274 bd_status = T_I_S | T_TC_S;
275 else
276 bd_status = T_I_S | T_TC_S | T_W_S;
277
278 iowrite16be(bd_status, &priv->tx_bd_base[i].status);
279 iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH,
280 &priv->tx_bd_base[i].buf);
281 }
282
283 return 0;
284
Zhao Qiang1efb5972016-07-15 10:38:25 +0800285free_tiptr:
Zhao Qiangc19b6d22016-06-06 14:30:02 +0800286 qe_muram_free(tiptr);
Zhao Qiang1efb5972016-07-15 10:38:25 +0800287free_riptr:
Zhao Qiangc19b6d22016-06-06 14:30:02 +0800288 qe_muram_free(riptr);
Zhao Qiang1efb5972016-07-15 10:38:25 +0800289free_tx_skbuff:
Zhao Qiangc19b6d22016-06-06 14:30:02 +0800290 kfree(priv->tx_skbuff);
Zhao Qiang1efb5972016-07-15 10:38:25 +0800291free_rx_skbuff:
Zhao Qiangc19b6d22016-06-06 14:30:02 +0800292 kfree(priv->rx_skbuff);
Zhao Qiang1efb5972016-07-15 10:38:25 +0800293free_ucc_pram:
Zhao Qiangc19b6d22016-06-06 14:30:02 +0800294 qe_muram_free(priv->ucc_pram_offset);
Zhao Qiang1efb5972016-07-15 10:38:25 +0800295free_tx_bd:
Zhao Qiangc19b6d22016-06-06 14:30:02 +0800296 dma_free_coherent(priv->dev,
Holger Brunck5b8aad92017-05-17 17:24:35 +0200297 TX_BD_RING_LEN * sizeof(struct qe_bd),
Zhao Qiangc19b6d22016-06-06 14:30:02 +0800298 priv->tx_bd_base, priv->dma_tx_bd);
Zhao Qiang1efb5972016-07-15 10:38:25 +0800299free_rx_bd:
Zhao Qiangc19b6d22016-06-06 14:30:02 +0800300 dma_free_coherent(priv->dev,
Holger Brunck5b8aad92017-05-17 17:24:35 +0200301 RX_BD_RING_LEN * sizeof(struct qe_bd),
Zhao Qiangc19b6d22016-06-06 14:30:02 +0800302 priv->rx_bd_base, priv->dma_rx_bd);
Zhao Qiang1efb5972016-07-15 10:38:25 +0800303free_uccf:
Zhao Qiangc19b6d22016-06-06 14:30:02 +0800304 ucc_fast_free(priv->uccf);
305
306 return ret;
307}
308
309static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
310{
311 hdlc_device *hdlc = dev_to_hdlc(dev);
312 struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)hdlc->priv;
313 struct qe_bd __iomem *bd;
314 u16 bd_status;
315 unsigned long flags;
Zhao Qiangc19b6d22016-06-06 14:30:02 +0800316 u16 *proto_head;
317
318 switch (dev->type) {
319 case ARPHRD_RAWHDLC:
320 if (skb_headroom(skb) < HDLC_HEAD_LEN) {
321 dev->stats.tx_dropped++;
322 dev_kfree_skb(skb);
323 netdev_err(dev, "No enough space for hdlc head\n");
324 return -ENOMEM;
325 }
326
327 skb_push(skb, HDLC_HEAD_LEN);
328
329 proto_head = (u16 *)skb->data;
330 *proto_head = htons(DEFAULT_HDLC_HEAD);
331
332 dev->stats.tx_bytes += skb->len;
333 break;
334
335 case ARPHRD_PPP:
336 proto_head = (u16 *)skb->data;
337 if (*proto_head != htons(DEFAULT_PPP_HEAD)) {
338 dev->stats.tx_dropped++;
339 dev_kfree_skb(skb);
340 netdev_err(dev, "Wrong ppp header\n");
341 return -ENOMEM;
342 }
343
344 dev->stats.tx_bytes += skb->len;
345 break;
346
347 default:
348 dev->stats.tx_dropped++;
349 dev_kfree_skb(skb);
350 return -ENOMEM;
351 }
Zhao Qiangc19b6d22016-06-06 14:30:02 +0800352 spin_lock_irqsave(&priv->lock, flags);
353
354 /* Start from the next BD that should be filled */
355 bd = priv->curtx_bd;
356 bd_status = ioread16be(&bd->status);
357 /* Save the skb pointer so we can free it later */
358 priv->tx_skbuff[priv->skb_curtx] = skb;
359
360 /* Update the current skb pointer (wrapping if this was the last) */
361 priv->skb_curtx =
362 (priv->skb_curtx + 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
363
364 /* copy skb data to tx buffer for sdma processing */
365 memcpy(priv->tx_buffer + (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
366 skb->data, skb->len);
367
368 /* set bd status and length */
369 bd_status = (bd_status & T_W_S) | T_R_S | T_I_S | T_L_S | T_TC_S;
370
Zhao Qiangc19b6d22016-06-06 14:30:02 +0800371 iowrite16be(skb->len, &bd->length);
Zhao Qiang02bb56d2017-03-14 09:38:33 +0800372 iowrite16be(bd_status, &bd->status);
Zhao Qiangc19b6d22016-06-06 14:30:02 +0800373
374 /* Move to next BD in the ring */
375 if (!(bd_status & T_W_S))
376 bd += 1;
377 else
378 bd = priv->tx_bd_base;
379
380 if (bd == priv->dirty_tx) {
381 if (!netif_queue_stopped(dev))
382 netif_stop_queue(dev);
383 }
384
385 priv->curtx_bd = bd;
386
387 spin_unlock_irqrestore(&priv->lock, flags);
388
389 return NETDEV_TX_OK;
390}
391
392static int hdlc_tx_done(struct ucc_hdlc_private *priv)
393{
394 /* Start from the next BD that should be filled */
395 struct net_device *dev = priv->ndev;
396 struct qe_bd *bd; /* BD pointer */
397 u16 bd_status;
398
399 bd = priv->dirty_tx;
400 bd_status = ioread16be(&bd->status);
401
402 /* Normal processing. */
403 while ((bd_status & T_R_S) == 0) {
404 struct sk_buff *skb;
405
406 /* BD contains already transmitted buffer. */
407 /* Handle the transmitted buffer and release */
408 /* the BD to be used with the current frame */
409
410 skb = priv->tx_skbuff[priv->skb_dirtytx];
411 if (!skb)
412 break;
Zhao Qiangc19b6d22016-06-06 14:30:02 +0800413 dev->stats.tx_packets++;
414 memset(priv->tx_buffer +
415 (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
416 0, skb->len);
417 dev_kfree_skb_irq(skb);
418
419 priv->tx_skbuff[priv->skb_dirtytx] = NULL;
420 priv->skb_dirtytx =
421 (priv->skb_dirtytx +
422 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
423
424 /* We freed a buffer, so now we can restart transmission */
425 if (netif_queue_stopped(dev))
426 netif_wake_queue(dev);
427
428 /* Advance the confirmation BD pointer */
429 if (!(bd_status & T_W_S))
430 bd += 1;
431 else
432 bd = priv->tx_bd_base;
433 bd_status = ioread16be(&bd->status);
434 }
435 priv->dirty_tx = bd;
436
437 return 0;
438}
439
440static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
441{
442 struct net_device *dev = priv->ndev;
Holger Brunck66bb1442017-05-17 17:24:33 +0200443 struct sk_buff *skb = NULL;
Zhao Qiangc19b6d22016-06-06 14:30:02 +0800444 hdlc_device *hdlc = dev_to_hdlc(dev);
445 struct qe_bd *bd;
Zhao Qiang02bb56d2017-03-14 09:38:33 +0800446 u16 bd_status;
Zhao Qiangc19b6d22016-06-06 14:30:02 +0800447 u16 length, howmany = 0;
448 u8 *bdbuffer;
Zhao Qiangc19b6d22016-06-06 14:30:02 +0800449
450 bd = priv->currx_bd;
451 bd_status = ioread16be(&bd->status);
452
453 /* while there are received buffers and BD is full (~R_E) */
454 while (!((bd_status & (R_E_S)) || (--rx_work_limit < 0))) {
455 if (bd_status & R_OV_S)
456 dev->stats.rx_over_errors++;
457 if (bd_status & R_CR_S) {
Zhao Qiangc19b6d22016-06-06 14:30:02 +0800458 dev->stats.rx_crc_errors++;
459 dev->stats.rx_dropped++;
460 goto recycle;
461 }
462 bdbuffer = priv->rx_buffer +
463 (priv->currx_bdnum * MAX_RX_BUF_LENGTH);
464 length = ioread16be(&bd->length);
465
Zhao Qiangc19b6d22016-06-06 14:30:02 +0800466 switch (dev->type) {
467 case ARPHRD_RAWHDLC:
468 bdbuffer += HDLC_HEAD_LEN;
469 length -= (HDLC_HEAD_LEN + HDLC_CRC_SIZE);
470
471 skb = dev_alloc_skb(length);
472 if (!skb) {
473 dev->stats.rx_dropped++;
474 return -ENOMEM;
475 }
476
477 skb_put(skb, length);
478 skb->len = length;
479 skb->dev = dev;
480 memcpy(skb->data, bdbuffer, length);
481 break;
482
483 case ARPHRD_PPP:
484 length -= HDLC_CRC_SIZE;
485
486 skb = dev_alloc_skb(length);
487 if (!skb) {
488 dev->stats.rx_dropped++;
489 return -ENOMEM;
490 }
491
492 skb_put(skb, length);
493 skb->len = length;
494 skb->dev = dev;
495 memcpy(skb->data, bdbuffer, length);
496 break;
497 }
498
499 dev->stats.rx_packets++;
500 dev->stats.rx_bytes += skb->len;
501 howmany++;
502 if (hdlc->proto)
503 skb->protocol = hdlc_type_trans(skb, dev);
Zhao Qiangc19b6d22016-06-06 14:30:02 +0800504 netif_receive_skb(skb);
505
506recycle:
507 iowrite16be(bd_status | R_E_S | R_I_S, &bd->status);
508
509 /* update to point at the next bd */
510 if (bd_status & R_W_S) {
511 priv->currx_bdnum = 0;
512 bd = priv->rx_bd_base;
513 } else {
514 if (priv->currx_bdnum < (RX_BD_RING_LEN - 1))
515 priv->currx_bdnum += 1;
516 else
517 priv->currx_bdnum = RX_BD_RING_LEN - 1;
518
519 bd += 1;
520 }
521
522 bd_status = ioread16be(&bd->status);
523 }
524
525 priv->currx_bd = bd;
526 return howmany;
527}
528
529static int ucc_hdlc_poll(struct napi_struct *napi, int budget)
530{
531 struct ucc_hdlc_private *priv = container_of(napi,
532 struct ucc_hdlc_private,
533 napi);
534 int howmany;
535
536 /* Tx event processing */
537 spin_lock(&priv->lock);
Holger Brunck10515db2017-05-17 17:24:34 +0200538 hdlc_tx_done(priv);
Zhao Qiangc19b6d22016-06-06 14:30:02 +0800539 spin_unlock(&priv->lock);
540
541 howmany = 0;
542 howmany += hdlc_rx_done(priv, budget - howmany);
543
544 if (howmany < budget) {
Eric Dumazet6ad20162017-01-30 08:22:01 -0800545 napi_complete_done(napi, howmany);
Zhao Qiangc19b6d22016-06-06 14:30:02 +0800546 qe_setbits32(priv->uccf->p_uccm,
547 (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16);
548 }
549
550 return howmany;
551}
552
553static irqreturn_t ucc_hdlc_irq_handler(int irq, void *dev_id)
554{
555 struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)dev_id;
556 struct net_device *dev = priv->ndev;
557 struct ucc_fast_private *uccf;
558 struct ucc_tdm_info *ut_info;
559 u32 ucce;
560 u32 uccm;
561
562 ut_info = priv->ut_info;
563 uccf = priv->uccf;
564
565 ucce = ioread32be(uccf->p_ucce);
566 uccm = ioread32be(uccf->p_uccm);
567 ucce &= uccm;
568 iowrite32be(ucce, uccf->p_ucce);
Zhao Qiangc19b6d22016-06-06 14:30:02 +0800569 if (!ucce)
570 return IRQ_NONE;
571
572 if ((ucce >> 16) & (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)) {
573 if (napi_schedule_prep(&priv->napi)) {
574 uccm &= ~((UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)
575 << 16);
576 iowrite32be(uccm, uccf->p_uccm);
577 __napi_schedule(&priv->napi);
578 }
579 }
580
581 /* Errors and other events */
582 if (ucce >> 16 & UCC_HDLC_UCCE_BSY)
583 dev->stats.rx_errors++;
584 if (ucce >> 16 & UCC_HDLC_UCCE_TXE)
585 dev->stats.tx_errors++;
586
587 return IRQ_HANDLED;
588}
589
590static int uhdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
591{
592 const size_t size = sizeof(te1_settings);
593 te1_settings line;
594 struct ucc_hdlc_private *priv = netdev_priv(dev);
595
596 if (cmd != SIOCWANDEV)
597 return hdlc_ioctl(dev, ifr, cmd);
598
599 switch (ifr->ifr_settings.type) {
600 case IF_GET_IFACE:
601 ifr->ifr_settings.type = IF_IFACE_E1;
602 if (ifr->ifr_settings.size < size) {
603 ifr->ifr_settings.size = size; /* data size wanted */
604 return -ENOBUFS;
605 }
Dan Carpenter2f43b9b2016-07-14 14:16:53 +0300606 memset(&line, 0, sizeof(line));
Zhao Qiangc19b6d22016-06-06 14:30:02 +0800607 line.clock_type = priv->clocking;
Zhao Qiangc19b6d22016-06-06 14:30:02 +0800608
609 if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &line, size))
610 return -EFAULT;
611 return 0;
612
613 default:
614 return hdlc_ioctl(dev, ifr, cmd);
615 }
616}
617
618static int uhdlc_open(struct net_device *dev)
619{
620 u32 cecr_subblock;
621 hdlc_device *hdlc = dev_to_hdlc(dev);
622 struct ucc_hdlc_private *priv = hdlc->priv;
623 struct ucc_tdm *utdm = priv->utdm;
624
625 if (priv->hdlc_busy != 1) {
626 if (request_irq(priv->ut_info->uf_info.irq,
627 ucc_hdlc_irq_handler, 0, "hdlc", priv))
628 return -ENODEV;
629
630 cecr_subblock = ucc_fast_get_qe_cr_subblock(
631 priv->ut_info->uf_info.ucc_num);
632
633 qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
634 QE_CR_PROTOCOL_UNSPECIFIED, 0);
635
636 ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
637
638 /* Enable the TDM port */
639 if (priv->tsa)
640 utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port);
641
642 priv->hdlc_busy = 1;
643 netif_device_attach(priv->ndev);
644 napi_enable(&priv->napi);
645 netif_start_queue(dev);
646 hdlc_open(dev);
647 }
648
649 return 0;
650}
651
652static void uhdlc_memclean(struct ucc_hdlc_private *priv)
653{
654 qe_muram_free(priv->ucc_pram->riptr);
655 qe_muram_free(priv->ucc_pram->tiptr);
656
657 if (priv->rx_bd_base) {
658 dma_free_coherent(priv->dev,
Holger Brunck5b8aad92017-05-17 17:24:35 +0200659 RX_BD_RING_LEN * sizeof(struct qe_bd),
Zhao Qiangc19b6d22016-06-06 14:30:02 +0800660 priv->rx_bd_base, priv->dma_rx_bd);
661
662 priv->rx_bd_base = NULL;
663 priv->dma_rx_bd = 0;
664 }
665
666 if (priv->tx_bd_base) {
667 dma_free_coherent(priv->dev,
Holger Brunck5b8aad92017-05-17 17:24:35 +0200668 TX_BD_RING_LEN * sizeof(struct qe_bd),
Zhao Qiangc19b6d22016-06-06 14:30:02 +0800669 priv->tx_bd_base, priv->dma_tx_bd);
670
671 priv->tx_bd_base = NULL;
672 priv->dma_tx_bd = 0;
673 }
674
675 if (priv->ucc_pram) {
676 qe_muram_free(priv->ucc_pram_offset);
677 priv->ucc_pram = NULL;
678 priv->ucc_pram_offset = 0;
679 }
680
681 kfree(priv->rx_skbuff);
682 priv->rx_skbuff = NULL;
683
684 kfree(priv->tx_skbuff);
685 priv->tx_skbuff = NULL;
686
687 if (priv->uf_regs) {
688 iounmap(priv->uf_regs);
689 priv->uf_regs = NULL;
690 }
691
692 if (priv->uccf) {
693 ucc_fast_free(priv->uccf);
694 priv->uccf = NULL;
695 }
696
697 if (priv->rx_buffer) {
698 dma_free_coherent(priv->dev,
699 RX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
700 priv->rx_buffer, priv->dma_rx_addr);
701 priv->rx_buffer = NULL;
702 priv->dma_rx_addr = 0;
703 }
704
705 if (priv->tx_buffer) {
706 dma_free_coherent(priv->dev,
707 TX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
708 priv->tx_buffer, priv->dma_tx_addr);
709 priv->tx_buffer = NULL;
710 priv->dma_tx_addr = 0;
711 }
712}
713
714static int uhdlc_close(struct net_device *dev)
715{
716 struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
717 struct ucc_tdm *utdm = priv->utdm;
718 u32 cecr_subblock;
719
720 napi_disable(&priv->napi);
721 cecr_subblock = ucc_fast_get_qe_cr_subblock(
722 priv->ut_info->uf_info.ucc_num);
723
724 qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
725 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
726 qe_issue_cmd(QE_CLOSE_RX_BD, cecr_subblock,
727 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
728
729 if (priv->tsa)
730 utdm->si_regs->siglmr1_h &= ~(0x1 << utdm->tdm_port);
731
732 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
733
734 free_irq(priv->ut_info->uf_info.irq, priv);
735 netif_stop_queue(dev);
736 priv->hdlc_busy = 0;
737
738 return 0;
739}
740
741static int ucc_hdlc_attach(struct net_device *dev, unsigned short encoding,
742 unsigned short parity)
743{
744 struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
745
746 if (encoding != ENCODING_NRZ &&
747 encoding != ENCODING_NRZI)
748 return -EINVAL;
749
750 if (parity != PARITY_NONE &&
751 parity != PARITY_CRC32_PR1_CCITT &&
752 parity != PARITY_CRC16_PR1_CCITT)
753 return -EINVAL;
754
755 priv->encoding = encoding;
756 priv->parity = parity;
757
758 return 0;
759}
760
761#ifdef CONFIG_PM
762static void store_clk_config(struct ucc_hdlc_private *priv)
763{
764 struct qe_mux *qe_mux_reg = &qe_immr->qmx;
765
766 /* store si clk */
767 priv->cmxsi1cr_h = ioread32be(&qe_mux_reg->cmxsi1cr_h);
768 priv->cmxsi1cr_l = ioread32be(&qe_mux_reg->cmxsi1cr_l);
769
770 /* store si sync */
771 priv->cmxsi1syr = ioread32be(&qe_mux_reg->cmxsi1syr);
772
773 /* store ucc clk */
774 memcpy_fromio(priv->cmxucr, qe_mux_reg->cmxucr, 4 * sizeof(u32));
775}
776
777static void resume_clk_config(struct ucc_hdlc_private *priv)
778{
779 struct qe_mux *qe_mux_reg = &qe_immr->qmx;
780
781 memcpy_toio(qe_mux_reg->cmxucr, priv->cmxucr, 4 * sizeof(u32));
782
783 iowrite32be(priv->cmxsi1cr_h, &qe_mux_reg->cmxsi1cr_h);
784 iowrite32be(priv->cmxsi1cr_l, &qe_mux_reg->cmxsi1cr_l);
785
786 iowrite32be(priv->cmxsi1syr, &qe_mux_reg->cmxsi1syr);
787}
788
789static int uhdlc_suspend(struct device *dev)
790{
791 struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
792 struct ucc_tdm_info *ut_info;
793 struct ucc_fast __iomem *uf_regs;
794
795 if (!priv)
796 return -EINVAL;
797
798 if (!netif_running(priv->ndev))
799 return 0;
800
801 netif_device_detach(priv->ndev);
802 napi_disable(&priv->napi);
803
804 ut_info = priv->ut_info;
805 uf_regs = priv->uf_regs;
806
807 /* backup gumr guemr*/
808 priv->gumr = ioread32be(&uf_regs->gumr);
809 priv->guemr = ioread8(&uf_regs->guemr);
810
811 priv->ucc_pram_bak = kmalloc(sizeof(*priv->ucc_pram_bak),
812 GFP_KERNEL);
813 if (!priv->ucc_pram_bak)
814 return -ENOMEM;
815
816 /* backup HDLC parameter */
817 memcpy_fromio(priv->ucc_pram_bak, priv->ucc_pram,
818 sizeof(struct ucc_hdlc_param));
819
820 /* store the clk configuration */
821 store_clk_config(priv);
822
823 /* save power */
824 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
825
Zhao Qiangc19b6d22016-06-06 14:30:02 +0800826 return 0;
827}
828
829static int uhdlc_resume(struct device *dev)
830{
831 struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
xypron.glpk@gmx.de8c57a3a2016-07-31 13:14:23 +0200832 struct ucc_tdm *utdm;
Zhao Qiangc19b6d22016-06-06 14:30:02 +0800833 struct ucc_tdm_info *ut_info;
834 struct ucc_fast __iomem *uf_regs;
835 struct ucc_fast_private *uccf;
836 struct ucc_fast_info *uf_info;
837 int ret, i;
838 u32 cecr_subblock;
839 u16 bd_status;
840
841 if (!priv)
842 return -EINVAL;
843
844 if (!netif_running(priv->ndev))
845 return 0;
846
xypron.glpk@gmx.de8c57a3a2016-07-31 13:14:23 +0200847 utdm = priv->utdm;
Zhao Qiangc19b6d22016-06-06 14:30:02 +0800848 ut_info = priv->ut_info;
849 uf_info = &ut_info->uf_info;
850 uf_regs = priv->uf_regs;
851 uccf = priv->uccf;
852
853 /* restore gumr guemr */
854 iowrite8(priv->guemr, &uf_regs->guemr);
855 iowrite32be(priv->gumr, &uf_regs->gumr);
856
857 /* Set Virtual Fifo registers */
858 iowrite16be(uf_info->urfs, &uf_regs->urfs);
859 iowrite16be(uf_info->urfet, &uf_regs->urfet);
860 iowrite16be(uf_info->urfset, &uf_regs->urfset);
861 iowrite16be(uf_info->utfs, &uf_regs->utfs);
862 iowrite16be(uf_info->utfet, &uf_regs->utfet);
863 iowrite16be(uf_info->utftt, &uf_regs->utftt);
864 /* utfb, urfb are offsets from MURAM base */
865 iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset, &uf_regs->utfb);
866 iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset, &uf_regs->urfb);
867
868 /* Rx Tx and sync clock routing */
869 resume_clk_config(priv);
870
871 iowrite32be(uf_info->uccm_mask, &uf_regs->uccm);
872 iowrite32be(0xffffffff, &uf_regs->ucce);
873
874 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
875
876 /* rebuild SIRAM */
877 if (priv->tsa)
878 ucc_tdm_init(priv->utdm, priv->ut_info);
879
880 /* Write to QE CECR, UCCx channel to Stop Transmission */
881 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
882 ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock,
883 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
884
885 /* Set UPSMR normal mode */
886 iowrite32be(0, &uf_regs->upsmr);
887
888 /* init parameter base */
889 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
890 ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
891 QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
892
893 priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
894 qe_muram_addr(priv->ucc_pram_offset);
895
896 /* restore ucc parameter */
897 memcpy_toio(priv->ucc_pram, priv->ucc_pram_bak,
898 sizeof(struct ucc_hdlc_param));
899 kfree(priv->ucc_pram_bak);
900
901 /* rebuild BD entry */
902 for (i = 0; i < RX_BD_RING_LEN; i++) {
903 if (i < (RX_BD_RING_LEN - 1))
904 bd_status = R_E_S | R_I_S;
905 else
906 bd_status = R_E_S | R_I_S | R_W_S;
907
908 iowrite16be(bd_status, &priv->rx_bd_base[i].status);
909 iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH,
910 &priv->rx_bd_base[i].buf);
911 }
912
913 for (i = 0; i < TX_BD_RING_LEN; i++) {
914 if (i < (TX_BD_RING_LEN - 1))
915 bd_status = T_I_S | T_TC_S;
916 else
917 bd_status = T_I_S | T_TC_S | T_W_S;
918
919 iowrite16be(bd_status, &priv->tx_bd_base[i].status);
920 iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH,
921 &priv->tx_bd_base[i].buf);
922 }
923
924 /* if hdlc is busy enable TX and RX */
925 if (priv->hdlc_busy == 1) {
926 cecr_subblock = ucc_fast_get_qe_cr_subblock(
927 priv->ut_info->uf_info.ucc_num);
928
929 qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
930 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
931
932 ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
933
934 /* Enable the TDM port */
935 if (priv->tsa)
936 utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port);
937 }
938
939 napi_enable(&priv->napi);
940 netif_device_attach(priv->ndev);
941
942 return 0;
943}
944
945static const struct dev_pm_ops uhdlc_pm_ops = {
946 .suspend = uhdlc_suspend,
947 .resume = uhdlc_resume,
948 .freeze = uhdlc_suspend,
949 .thaw = uhdlc_resume,
950};
951
952#define HDLC_PM_OPS (&uhdlc_pm_ops)
953
954#else
955
956#define HDLC_PM_OPS NULL
957
958#endif
959static const struct net_device_ops uhdlc_ops = {
960 .ndo_open = uhdlc_open,
961 .ndo_stop = uhdlc_close,
Zhao Qiangc19b6d22016-06-06 14:30:02 +0800962 .ndo_start_xmit = hdlc_start_xmit,
963 .ndo_do_ioctl = uhdlc_ioctl,
964};
965
966static int ucc_hdlc_probe(struct platform_device *pdev)
967{
968 struct device_node *np = pdev->dev.of_node;
969 struct ucc_hdlc_private *uhdlc_priv = NULL;
970 struct ucc_tdm_info *ut_info;
Holger Brunck66bb1442017-05-17 17:24:33 +0200971 struct ucc_tdm *utdm = NULL;
Zhao Qiangc19b6d22016-06-06 14:30:02 +0800972 struct resource res;
973 struct net_device *dev;
974 hdlc_device *hdlc;
975 int ucc_num;
976 const char *sprop;
977 int ret;
978 u32 val;
979
980 ret = of_property_read_u32_index(np, "cell-index", 0, &val);
981 if (ret) {
982 dev_err(&pdev->dev, "Invalid ucc property\n");
983 return -ENODEV;
984 }
985
986 ucc_num = val - 1;
987 if ((ucc_num > 3) || (ucc_num < 0)) {
988 dev_err(&pdev->dev, ": Invalid UCC num\n");
989 return -EINVAL;
990 }
991
992 memcpy(&utdm_info[ucc_num], &utdm_primary_info,
993 sizeof(utdm_primary_info));
994
995 ut_info = &utdm_info[ucc_num];
996 ut_info->uf_info.ucc_num = ucc_num;
997
998 sprop = of_get_property(np, "rx-clock-name", NULL);
999 if (sprop) {
1000 ut_info->uf_info.rx_clock = qe_clock_source(sprop);
1001 if ((ut_info->uf_info.rx_clock < QE_CLK_NONE) ||
1002 (ut_info->uf_info.rx_clock > QE_CLK24)) {
1003 dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
1004 return -EINVAL;
1005 }
1006 } else {
1007 dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
1008 return -EINVAL;
1009 }
1010
1011 sprop = of_get_property(np, "tx-clock-name", NULL);
1012 if (sprop) {
1013 ut_info->uf_info.tx_clock = qe_clock_source(sprop);
1014 if ((ut_info->uf_info.tx_clock < QE_CLK_NONE) ||
1015 (ut_info->uf_info.tx_clock > QE_CLK24)) {
1016 dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
1017 return -EINVAL;
1018 }
1019 } else {
1020 dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
1021 return -EINVAL;
1022 }
1023
1024 /* use the same clock when work in loopback */
1025 if (ut_info->uf_info.rx_clock == ut_info->uf_info.tx_clock)
1026 qe_setbrg(ut_info->uf_info.rx_clock, 20000000, 1);
1027
1028 ret = of_address_to_resource(np, 0, &res);
1029 if (ret)
1030 return -EINVAL;
1031
1032 ut_info->uf_info.regs = res.start;
1033 ut_info->uf_info.irq = irq_of_parse_and_map(np, 0);
1034
1035 uhdlc_priv = kzalloc(sizeof(*uhdlc_priv), GFP_KERNEL);
1036 if (!uhdlc_priv) {
Zhao Qiang1efb5972016-07-15 10:38:25 +08001037 return -ENOMEM;
Zhao Qiangc19b6d22016-06-06 14:30:02 +08001038 }
1039
1040 dev_set_drvdata(&pdev->dev, uhdlc_priv);
1041 uhdlc_priv->dev = &pdev->dev;
1042 uhdlc_priv->ut_info = ut_info;
1043
1044 if (of_get_property(np, "fsl,tdm-interface", NULL))
1045 uhdlc_priv->tsa = 1;
1046
1047 if (of_get_property(np, "fsl,ucc-internal-loopback", NULL))
1048 uhdlc_priv->loopback = 1;
1049
1050 if (uhdlc_priv->tsa == 1) {
1051 utdm = kzalloc(sizeof(*utdm), GFP_KERNEL);
1052 if (!utdm) {
1053 ret = -ENOMEM;
1054 dev_err(&pdev->dev, "No mem to alloc ucc tdm data\n");
Zhao Qiang1efb5972016-07-15 10:38:25 +08001055 goto free_uhdlc_priv;
Zhao Qiangc19b6d22016-06-06 14:30:02 +08001056 }
1057 uhdlc_priv->utdm = utdm;
1058 ret = ucc_of_parse_tdm(np, utdm, ut_info);
1059 if (ret)
Zhao Qiang1efb5972016-07-15 10:38:25 +08001060 goto free_utdm;
Zhao Qiangc19b6d22016-06-06 14:30:02 +08001061 }
1062
1063 ret = uhdlc_init(uhdlc_priv);
1064 if (ret) {
1065 dev_err(&pdev->dev, "Failed to init uhdlc\n");
Zhao Qiang1efb5972016-07-15 10:38:25 +08001066 goto free_utdm;
Zhao Qiangc19b6d22016-06-06 14:30:02 +08001067 }
1068
1069 dev = alloc_hdlcdev(uhdlc_priv);
1070 if (!dev) {
1071 ret = -ENOMEM;
1072 pr_err("ucc_hdlc: unable to allocate memory\n");
Zhao Qiang1efb5972016-07-15 10:38:25 +08001073 goto undo_uhdlc_init;
Zhao Qiangc19b6d22016-06-06 14:30:02 +08001074 }
1075
1076 uhdlc_priv->ndev = dev;
1077 hdlc = dev_to_hdlc(dev);
1078 dev->tx_queue_len = 16;
1079 dev->netdev_ops = &uhdlc_ops;
1080 hdlc->attach = ucc_hdlc_attach;
1081 hdlc->xmit = ucc_hdlc_tx;
1082 netif_napi_add(dev, &uhdlc_priv->napi, ucc_hdlc_poll, 32);
1083 if (register_hdlc_device(dev)) {
1084 ret = -ENOBUFS;
1085 pr_err("ucc_hdlc: unable to register hdlc device\n");
1086 free_netdev(dev);
Zhao Qiang1efb5972016-07-15 10:38:25 +08001087 goto free_dev;
Zhao Qiangc19b6d22016-06-06 14:30:02 +08001088 }
1089
1090 return 0;
1091
Zhao Qiang1efb5972016-07-15 10:38:25 +08001092free_dev:
1093 free_netdev(dev);
1094undo_uhdlc_init:
1095free_utdm:
Zhao Qiangc19b6d22016-06-06 14:30:02 +08001096 if (uhdlc_priv->tsa)
1097 kfree(utdm);
Zhao Qiang1efb5972016-07-15 10:38:25 +08001098free_uhdlc_priv:
Zhao Qiangc19b6d22016-06-06 14:30:02 +08001099 kfree(uhdlc_priv);
Zhao Qiangc19b6d22016-06-06 14:30:02 +08001100 return ret;
1101}
1102
1103static int ucc_hdlc_remove(struct platform_device *pdev)
1104{
1105 struct ucc_hdlc_private *priv = dev_get_drvdata(&pdev->dev);
1106
1107 uhdlc_memclean(priv);
1108
1109 if (priv->utdm->si_regs) {
1110 iounmap(priv->utdm->si_regs);
1111 priv->utdm->si_regs = NULL;
1112 }
1113
1114 if (priv->utdm->siram) {
1115 iounmap(priv->utdm->siram);
1116 priv->utdm->siram = NULL;
1117 }
1118 kfree(priv);
1119
1120 dev_info(&pdev->dev, "UCC based hdlc module removed\n");
1121
1122 return 0;
1123}
1124
1125static const struct of_device_id fsl_ucc_hdlc_of_match[] = {
1126 {
1127 .compatible = "fsl,ucc-hdlc",
1128 },
1129 {},
1130};
1131
1132MODULE_DEVICE_TABLE(of, fsl_ucc_hdlc_of_match);
1133
1134static struct platform_driver ucc_hdlc_driver = {
1135 .probe = ucc_hdlc_probe,
1136 .remove = ucc_hdlc_remove,
1137 .driver = {
Zhao Qiangc19b6d22016-06-06 14:30:02 +08001138 .name = DRV_NAME,
1139 .pm = HDLC_PM_OPS,
1140 .of_match_table = fsl_ucc_hdlc_of_match,
1141 },
1142};
1143
Wei Yongjun459421c2016-07-19 11:25:16 +00001144module_platform_driver(ucc_hdlc_driver);
Valentin Longchamp74179d42017-02-17 11:31:22 +01001145MODULE_LICENSE("GPL");