blob: 0e97e5c94f8ac98e318eee16ceade00f23aac308 [file] [log] [blame]
Li Yang98658532006-10-03 23:10:46 -05001/*
2 * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved.
3 *
4 * Authors: Shlomi Gridish <gridish@freescale.com>
5 * Li Yang <leoli@freescale.com>
6 *
7 * Description:
8 * QE UCC Slow API Set - UCC Slow specific routines implementations.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 */
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/errno.h>
18#include <linux/slab.h>
19#include <linux/stddef.h>
20#include <linux/interrupt.h>
21
22#include <asm/irq.h>
23#include <asm/io.h>
24#include <asm/immap_qe.h>
25#include <asm/qe.h>
26
27#include <asm/ucc.h>
28#include <asm/ucc_slow.h>
29
30#define uccs_printk(level, format, arg...) \
31 printk(level format "\n", ## arg)
32
33#define uccs_dbg(format, arg...) \
34 uccs_printk(KERN_DEBUG , format , ## arg)
35#define uccs_err(format, arg...) \
36 uccs_printk(KERN_ERR , format , ## arg)
37#define uccs_info(format, arg...) \
38 uccs_printk(KERN_INFO , format , ## arg)
39#define uccs_warn(format, arg...) \
40 uccs_printk(KERN_WARNING , format , ## arg)
41
42#ifdef UCCS_VERBOSE_DEBUG
43#define uccs_vdbg uccs_dbg
44#else
45#define uccs_vdbg(fmt, args...) do { } while (0)
46#endif /* UCCS_VERBOSE_DEBUG */
47
48u32 ucc_slow_get_qe_cr_subblock(int uccs_num)
49{
50 switch (uccs_num) {
51 case 0: return QE_CR_SUBBLOCK_UCCSLOW1;
52 case 1: return QE_CR_SUBBLOCK_UCCSLOW2;
53 case 2: return QE_CR_SUBBLOCK_UCCSLOW3;
54 case 3: return QE_CR_SUBBLOCK_UCCSLOW4;
55 case 4: return QE_CR_SUBBLOCK_UCCSLOW5;
56 case 5: return QE_CR_SUBBLOCK_UCCSLOW6;
57 case 6: return QE_CR_SUBBLOCK_UCCSLOW7;
58 case 7: return QE_CR_SUBBLOCK_UCCSLOW8;
59 default: return QE_CR_SUBBLOCK_INVALID;
60 }
61}
62
63void ucc_slow_poll_transmitter_now(struct ucc_slow_private * uccs)
64{
65 out_be16(&uccs->us_regs->utodr, UCC_SLOW_TOD);
66}
67
68void ucc_slow_graceful_stop_tx(struct ucc_slow_private * uccs)
69{
70 struct ucc_slow_info *us_info = uccs->us_info;
71 u32 id;
72
73 id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
74 qe_issue_cmd(QE_GRACEFUL_STOP_TX, id,
75 QE_CR_PROTOCOL_UNSPECIFIED, 0);
76}
77
78void ucc_slow_stop_tx(struct ucc_slow_private * uccs)
79{
80 struct ucc_slow_info *us_info = uccs->us_info;
81 u32 id;
82
83 id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
84 qe_issue_cmd(QE_STOP_TX, id, QE_CR_PROTOCOL_UNSPECIFIED, 0);
85}
86
87void ucc_slow_restart_tx(struct ucc_slow_private * uccs)
88{
89 struct ucc_slow_info *us_info = uccs->us_info;
90 u32 id;
91
92 id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
93 qe_issue_cmd(QE_RESTART_TX, id, QE_CR_PROTOCOL_UNSPECIFIED, 0);
94}
95
96void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode)
97{
98 struct ucc_slow *us_regs;
99 u32 gumr_l;
100
101 us_regs = uccs->us_regs;
102
103 /* Enable reception and/or transmission on this UCC. */
104 gumr_l = in_be32(&us_regs->gumr_l);
105 if (mode & COMM_DIR_TX) {
106 gumr_l |= UCC_SLOW_GUMR_L_ENT;
107 uccs->enabled_tx = 1;
108 }
109 if (mode & COMM_DIR_RX) {
110 gumr_l |= UCC_SLOW_GUMR_L_ENR;
111 uccs->enabled_rx = 1;
112 }
113 out_be32(&us_regs->gumr_l, gumr_l);
114}
115
116void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode)
117{
118 struct ucc_slow *us_regs;
119 u32 gumr_l;
120
121 us_regs = uccs->us_regs;
122
123 /* Disable reception and/or transmission on this UCC. */
124 gumr_l = in_be32(&us_regs->gumr_l);
125 if (mode & COMM_DIR_TX) {
126 gumr_l &= ~UCC_SLOW_GUMR_L_ENT;
127 uccs->enabled_tx = 0;
128 }
129 if (mode & COMM_DIR_RX) {
130 gumr_l &= ~UCC_SLOW_GUMR_L_ENR;
131 uccs->enabled_rx = 0;
132 }
133 out_be32(&us_regs->gumr_l, gumr_l);
134}
135
136int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** uccs_ret)
137{
138 u32 i;
139 struct ucc_slow *us_regs;
140 u32 gumr;
141 u8 function_code = 0;
142 u8 *bd;
143 struct ucc_slow_private *uccs;
144 u32 id;
145 u32 command;
146 int ret;
147
148 uccs_vdbg("%s: IN", __FUNCTION__);
149
150 if (!us_info)
151 return -EINVAL;
152
153 /* check if the UCC port number is in range. */
154 if ((us_info->ucc_num < 0) || (us_info->ucc_num > UCC_MAX_NUM - 1)) {
Timur Tabiaa7a32c2006-10-18 17:27:32 -0500155 uccs_err("ucc_slow_init: Illegal UCC number!");
Li Yang98658532006-10-03 23:10:46 -0500156 return -EINVAL;
157 }
158
159 /*
160 * Set mrblr
161 * Check that 'max_rx_buf_length' is properly aligned (4), unless
162 * rfw is 1, meaning that QE accepts one byte at a time, unlike normal
163 * case when QE accepts 32 bits at a time.
164 */
165 if ((!us_info->rfw) &&
166 (us_info->max_rx_buf_length & (UCC_SLOW_MRBLR_ALIGNMENT - 1))) {
167 uccs_err("max_rx_buf_length not aligned.");
168 return -EINVAL;
169 }
170
Yan Burmanf8485352006-12-02 13:26:57 +0200171 uccs = kzalloc(sizeof(struct ucc_slow_private), GFP_KERNEL);
Li Yang98658532006-10-03 23:10:46 -0500172 if (!uccs) {
173 uccs_err
174 ("ucc_slow_init: No memory for UCC slow data structure!");
175 return -ENOMEM;
176 }
Li Yang98658532006-10-03 23:10:46 -0500177
178 /* Fill slow UCC structure */
179 uccs->us_info = us_info;
180 uccs->saved_uccm = 0;
181 uccs->p_rx_frame = 0;
Timur Tabi8f34f6c2007-02-08 10:47:31 -0600182 uccs->us_regs = us_info->regs;
Li Yang98658532006-10-03 23:10:46 -0500183 us_regs = uccs->us_regs;
184 uccs->p_ucce = (u16 *) & (us_regs->ucce);
185 uccs->p_uccm = (u16 *) & (us_regs->uccm);
186#ifdef STATISTICS
187 uccs->rx_frames = 0;
188 uccs->tx_frames = 0;
189 uccs->rx_discarded = 0;
190#endif /* STATISTICS */
191
192 /* Get PRAM base */
193 uccs->us_pram_offset = qe_muram_alloc(UCC_SLOW_PRAM_SIZE,
194 ALIGNMENT_OF_UCC_SLOW_PRAM);
195 if (IS_MURAM_ERR(uccs->us_pram_offset)) {
196 uccs_err
197 ("ucc_slow_init: Can not allocate MURAM memory "
198 "for Slow UCC.");
199 ucc_slow_free(uccs);
200 return -ENOMEM;
201 }
202 id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
203 qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, id, QE_CR_PROTOCOL_UNSPECIFIED,
204 (u32) uccs->us_pram_offset);
205
206 uccs->us_pram = qe_muram_addr(uccs->us_pram_offset);
207
208 /* Init Guemr register */
Timur Tabi8f34f6c2007-02-08 10:47:31 -0600209 if ((ret = ucc_init_guemr((struct ucc_common *) (us_info->regs)))) {
Li Yang98658532006-10-03 23:10:46 -0500210 uccs_err("ucc_slow_init: Could not init the guemr register.");
211 ucc_slow_free(uccs);
212 return ret;
213 }
214
215 /* Set UCC to slow type */
216 if ((ret = ucc_set_type(us_info->ucc_num,
Timur Tabi8f34f6c2007-02-08 10:47:31 -0600217 (struct ucc_common *) (us_info->regs),
Li Yang98658532006-10-03 23:10:46 -0500218 UCC_SPEED_TYPE_SLOW))) {
219 uccs_err("ucc_slow_init: Could not init the guemr register.");
220 ucc_slow_free(uccs);
221 return ret;
222 }
223
224 out_be16(&uccs->us_pram->mrblr, us_info->max_rx_buf_length);
225
226 INIT_LIST_HEAD(&uccs->confQ);
227
228 /* Allocate BDs. */
229 uccs->rx_base_offset =
230 qe_muram_alloc(us_info->rx_bd_ring_len * sizeof(struct qe_bd),
231 QE_ALIGNMENT_OF_BD);
232 if (IS_MURAM_ERR(uccs->rx_base_offset)) {
233 uccs_err("ucc_slow_init: No memory for Rx BD's.");
234 uccs->rx_base_offset = 0;
235 ucc_slow_free(uccs);
236 return -ENOMEM;
237 }
238
239 uccs->tx_base_offset =
240 qe_muram_alloc(us_info->tx_bd_ring_len * sizeof(struct qe_bd),
241 QE_ALIGNMENT_OF_BD);
242 if (IS_MURAM_ERR(uccs->tx_base_offset)) {
243 uccs_err("ucc_slow_init: No memory for Tx BD's.");
244 uccs->tx_base_offset = 0;
245 ucc_slow_free(uccs);
246 return -ENOMEM;
247 }
248
249 /* Init Tx bds */
250 bd = uccs->confBd = uccs->tx_bd = qe_muram_addr(uccs->tx_base_offset);
251 for (i = 0; i < us_info->tx_bd_ring_len; i++) {
252 /* clear bd buffer */
253 out_be32(&(((struct qe_bd *)bd)->buf), 0);
254 /* set bd status and length */
255 out_be32((u32*)bd, 0);
256 bd += sizeof(struct qe_bd);
257 }
258 bd -= sizeof(struct qe_bd);
259 /* set bd status and length */
260 out_be32((u32*)bd, T_W); /* for last BD set Wrap bit */
261
262 /* Init Rx bds */
263 bd = uccs->rx_bd = qe_muram_addr(uccs->rx_base_offset);
264 for (i = 0; i < us_info->rx_bd_ring_len; i++) {
265 /* set bd status and length */
266 out_be32((u32*)bd, 0);
267 /* clear bd buffer */
268 out_be32(&(((struct qe_bd *)bd)->buf), 0);
269 bd += sizeof(struct qe_bd);
270 }
271 bd -= sizeof(struct qe_bd);
272 /* set bd status and length */
273 out_be32((u32*)bd, R_W); /* for last BD set Wrap bit */
274
275 /* Set GUMR (For more details see the hardware spec.). */
276 /* gumr_h */
277 gumr = 0;
278 gumr |= us_info->tcrc;
279 if (us_info->cdp)
280 gumr |= UCC_SLOW_GUMR_H_CDP;
281 if (us_info->ctsp)
282 gumr |= UCC_SLOW_GUMR_H_CTSP;
283 if (us_info->cds)
284 gumr |= UCC_SLOW_GUMR_H_CDS;
285 if (us_info->ctss)
286 gumr |= UCC_SLOW_GUMR_H_CTSS;
287 if (us_info->tfl)
288 gumr |= UCC_SLOW_GUMR_H_TFL;
289 if (us_info->rfw)
290 gumr |= UCC_SLOW_GUMR_H_RFW;
291 if (us_info->txsy)
292 gumr |= UCC_SLOW_GUMR_H_TXSY;
293 if (us_info->rtsm)
294 gumr |= UCC_SLOW_GUMR_H_RTSM;
295 out_be32(&us_regs->gumr_h, gumr);
296
297 /* gumr_l */
298 gumr = 0;
299 if (us_info->tci)
300 gumr |= UCC_SLOW_GUMR_L_TCI;
301 if (us_info->rinv)
302 gumr |= UCC_SLOW_GUMR_L_RINV;
303 if (us_info->tinv)
304 gumr |= UCC_SLOW_GUMR_L_TINV;
305 if (us_info->tend)
306 gumr |= UCC_SLOW_GUMR_L_TEND;
307 gumr |= us_info->tdcr;
308 gumr |= us_info->rdcr;
309 gumr |= us_info->tenc;
310 gumr |= us_info->renc;
311 gumr |= us_info->diag;
312 gumr |= us_info->mode;
313 out_be32(&us_regs->gumr_l, gumr);
314
315 /* Function code registers */
316 /* function_code has initial value 0 */
317
318 /* if the data is in cachable memory, the 'global' */
319 /* in the function code should be set. */
320 function_code |= us_info->data_mem_part;
321 function_code |= QE_BMR_BYTE_ORDER_BO_MOT; /* Required for QE */
322 uccs->us_pram->tfcr = function_code;
323 uccs->us_pram->rfcr = function_code;
324
325 /* rbase, tbase are offsets from MURAM base */
326 out_be16(&uccs->us_pram->rbase, uccs->us_pram_offset);
327 out_be16(&uccs->us_pram->tbase, uccs->us_pram_offset);
328
329 /* Mux clocking */
330 /* Grant Support */
331 ucc_set_qe_mux_grant(us_info->ucc_num, us_info->grant_support);
332 /* Breakpoint Support */
333 ucc_set_qe_mux_bkpt(us_info->ucc_num, us_info->brkpt_support);
334 /* Set Tsa or NMSI mode. */
335 ucc_set_qe_mux_tsa(us_info->ucc_num, us_info->tsa);
336 /* If NMSI (not Tsa), set Tx and Rx clock. */
337 if (!us_info->tsa) {
338 /* Rx clock routing */
339 if (ucc_set_qe_mux_rxtx
340 (us_info->ucc_num, us_info->rx_clock, COMM_DIR_RX)) {
341 uccs_err
342 ("ucc_slow_init: Illegal value for parameter"
343 " 'RxClock'.");
344 ucc_slow_free(uccs);
345 return -EINVAL;
346 }
347 /* Tx clock routing */
348 if (ucc_set_qe_mux_rxtx(us_info->ucc_num,
349 us_info->tx_clock, COMM_DIR_TX)) {
350 uccs_err
351 ("ucc_slow_init: Illegal value for parameter "
352 "'TxClock'.");
353 ucc_slow_free(uccs);
354 return -EINVAL;
355 }
356 }
357
358 /*
359 * INTERRUPTS
360 */
361 /* Set interrupt mask register at UCC level. */
362 out_be16(&us_regs->uccm, us_info->uccm_mask);
363
364 /* First, clear anything pending at UCC level, */
365 /* otherwise, old garbage may come through */
366 /* as soon as the dam is opened. */
367
368 /* Writing '1' clears */
369 out_be16(&us_regs->ucce, 0xffff);
370
371 /* Issue QE Init command */
372 if (us_info->init_tx && us_info->init_rx)
373 command = QE_INIT_TX_RX;
374 else if (us_info->init_tx)
375 command = QE_INIT_TX;
376 else
377 command = QE_INIT_RX; /* We know at least one is TRUE */
378 id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
379 qe_issue_cmd(command, id, QE_CR_PROTOCOL_UNSPECIFIED, 0);
380
381 *uccs_ret = uccs;
382 return 0;
383}
384
385void ucc_slow_free(struct ucc_slow_private * uccs)
386{
387 if (!uccs)
388 return;
389
390 if (uccs->rx_base_offset)
391 qe_muram_free(uccs->rx_base_offset);
392
393 if (uccs->tx_base_offset)
394 qe_muram_free(uccs->tx_base_offset);
395
396 if (uccs->us_pram) {
397 qe_muram_free(uccs->us_pram_offset);
398 uccs->us_pram = NULL;
399 }
400
401 kfree(uccs);
402}