blob: 025fa0eb6f473e2b30316fce4c78af4262b823bd [file] [log] [blame]
Arend van Spriel5b435de2011-10-05 13:19:03 +02001/*
2 * Copyright (c) 2010 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 *
16 * File contents: support functions for PCI/PCIe
17 */
18
19#include <linux/delay.h>
20#include <linux/pci.h>
21
22#include <defs.h>
23#include <chipcommon.h>
24#include <brcmu_utils.h>
25#include <brcm_hw_ids.h>
26#include <soc.h>
27#include "types.h"
28#include "pub.h"
29#include "pmu.h"
30#include "srom.h"
31#include "nicpci.h"
32#include "aiutils.h"
33
34/* slow_clk_ctl */
35 /* slow clock source mask */
36#define SCC_SS_MASK 0x00000007
37 /* source of slow clock is LPO */
38#define SCC_SS_LPO 0x00000000
39 /* source of slow clock is crystal */
40#define SCC_SS_XTAL 0x00000001
41 /* source of slow clock is PCI */
42#define SCC_SS_PCI 0x00000002
43 /* LPOFreqSel, 1: 160Khz, 0: 32KHz */
44#define SCC_LF 0x00000200
45 /* LPOPowerDown, 1: LPO is disabled, 0: LPO is enabled */
46#define SCC_LP 0x00000400
47 /* ForceSlowClk, 1: sb/cores running on slow clock, 0: power logic control */
48#define SCC_FS 0x00000800
49 /* IgnorePllOffReq, 1/0:
50 * power logic ignores/honors PLL clock disable requests from core
51 */
52#define SCC_IP 0x00001000
53 /* XtalControlEn, 1/0:
54 * power logic does/doesn't disable crystal when appropriate
55 */
56#define SCC_XC 0x00002000
57 /* XtalPU (RO), 1/0: crystal running/disabled */
58#define SCC_XP 0x00004000
59 /* ClockDivider (SlowClk = 1/(4+divisor)) */
60#define SCC_CD_MASK 0xffff0000
61#define SCC_CD_SHIFT 16
62
63/* system_clk_ctl */
64 /* ILPen: Enable Idle Low Power */
65#define SYCC_IE 0x00000001
66 /* ALPen: Enable Active Low Power */
67#define SYCC_AE 0x00000002
68 /* ForcePLLOn */
69#define SYCC_FP 0x00000004
70 /* Force ALP (or HT if ALPen is not set */
71#define SYCC_AR 0x00000008
72 /* Force HT */
73#define SYCC_HR 0x00000010
74 /* ClkDiv (ILP = 1/(4 * (divisor + 1)) */
75#define SYCC_CD_MASK 0xffff0000
76#define SYCC_CD_SHIFT 16
77
78#define CST4329_SPROM_OTP_SEL_MASK 0x00000003
79 /* OTP is powered up, use def. CIS, no SPROM */
80#define CST4329_DEFCIS_SEL 0
81 /* OTP is powered up, SPROM is present */
82#define CST4329_SPROM_SEL 1
83 /* OTP is powered up, no SPROM */
84#define CST4329_OTP_SEL 2
85 /* OTP is powered down, SPROM is present */
86#define CST4329_OTP_PWRDN 3
87
88#define CST4329_SPI_SDIO_MODE_MASK 0x00000004
89#define CST4329_SPI_SDIO_MODE_SHIFT 2
90
91/* 43224 chip-specific ChipControl register bits */
92#define CCTRL43224_GPIO_TOGGLE 0x8000
93 /* 12 mA drive strength */
94#define CCTRL_43224A0_12MA_LED_DRIVE 0x00F000F0
95 /* 12 mA drive strength for later 43224s */
96#define CCTRL_43224B0_12MA_LED_DRIVE 0xF0
97
98/* 43236 Chip specific ChipStatus register bits */
99#define CST43236_SFLASH_MASK 0x00000040
100#define CST43236_OTP_MASK 0x00000080
101#define CST43236_HSIC_MASK 0x00000100 /* USB/HSIC */
102#define CST43236_BP_CLK 0x00000200 /* 120/96Mbps */
103#define CST43236_BOOT_MASK 0x00001800
104#define CST43236_BOOT_SHIFT 11
105#define CST43236_BOOT_FROM_SRAM 0 /* boot from SRAM, ARM in reset */
106#define CST43236_BOOT_FROM_ROM 1 /* boot from ROM */
107#define CST43236_BOOT_FROM_FLASH 2 /* boot from FLASH */
108#define CST43236_BOOT_FROM_INVALID 3
109
110/* 4331 chip-specific ChipControl register bits */
111 /* 0 disable */
112#define CCTRL4331_BT_COEXIST (1<<0)
113 /* 0 SECI is disabled (JTAG functional) */
114#define CCTRL4331_SECI (1<<1)
115 /* 0 disable */
116#define CCTRL4331_EXT_LNA (1<<2)
117 /* sprom/gpio13-15 mux */
118#define CCTRL4331_SPROM_GPIO13_15 (1<<3)
119 /* 0 ext pa disable, 1 ext pa enabled */
120#define CCTRL4331_EXTPA_EN (1<<4)
121 /* set drive out GPIO_CLK on sprom_cs pin */
122#define CCTRL4331_GPIOCLK_ON_SPROMCS (1<<5)
123 /* use sprom_cs pin as PCIE mdio interface */
124#define CCTRL4331_PCIE_MDIO_ON_SPROMCS (1<<6)
125 /* aband extpa will be at gpio2/5 and sprom_dout */
126#define CCTRL4331_EXTPA_ON_GPIO2_5 (1<<7)
127 /* override core control on pipe_AuxClkEnable */
128#define CCTRL4331_OVR_PIPEAUXCLKEN (1<<8)
129 /* override core control on pipe_AuxPowerDown */
130#define CCTRL4331_OVR_PIPEAUXPWRDOWN (1<<9)
131 /* pcie_auxclkenable */
132#define CCTRL4331_PCIE_AUXCLKEN (1<<10)
133 /* pcie_pipe_pllpowerdown */
134#define CCTRL4331_PCIE_PIPE_PLLDOWN (1<<11)
135 /* enable bt_shd0 at gpio4 */
136#define CCTRL4331_BT_SHD0_ON_GPIO4 (1<<16)
137 /* enable bt_shd1 at gpio5 */
138#define CCTRL4331_BT_SHD1_ON_GPIO5 (1<<17)
139
140/* 4331 Chip specific ChipStatus register bits */
141 /* crystal frequency 20/40Mhz */
142#define CST4331_XTAL_FREQ 0x00000001
143#define CST4331_SPROM_PRESENT 0x00000002
144#define CST4331_OTP_PRESENT 0x00000004
145#define CST4331_LDO_RF 0x00000008
146#define CST4331_LDO_PAR 0x00000010
147
148/* 4319 chip-specific ChipStatus register bits */
149#define CST4319_SPI_CPULESSUSB 0x00000001
150#define CST4319_SPI_CLK_POL 0x00000002
151#define CST4319_SPI_CLK_PH 0x00000008
152 /* gpio [7:6], SDIO CIS selection */
153#define CST4319_SPROM_OTP_SEL_MASK 0x000000c0
154#define CST4319_SPROM_OTP_SEL_SHIFT 6
155 /* use default CIS, OTP is powered up */
156#define CST4319_DEFCIS_SEL 0x00000000
157 /* use SPROM, OTP is powered up */
158#define CST4319_SPROM_SEL 0x00000040
159 /* use OTP, OTP is powered up */
160#define CST4319_OTP_SEL 0x00000080
161 /* use SPROM, OTP is powered down */
162#define CST4319_OTP_PWRDN 0x000000c0
163 /* gpio [8], sdio/usb mode */
164#define CST4319_SDIO_USB_MODE 0x00000100
165#define CST4319_REMAP_SEL_MASK 0x00000600
166#define CST4319_ILPDIV_EN 0x00000800
167#define CST4319_XTAL_PD_POL 0x00001000
168#define CST4319_LPO_SEL 0x00002000
169#define CST4319_RES_INIT_MODE 0x0000c000
170 /* PALDO is configured with external PNP */
171#define CST4319_PALDO_EXTPNP 0x00010000
172#define CST4319_CBUCK_MODE_MASK 0x00060000
173#define CST4319_CBUCK_MODE_BURST 0x00020000
174#define CST4319_CBUCK_MODE_LPBURST 0x00060000
175#define CST4319_RCAL_VALID 0x01000000
176#define CST4319_RCAL_VALUE_MASK 0x3e000000
177#define CST4319_RCAL_VALUE_SHIFT 25
178
179/* 4336 chip-specific ChipStatus register bits */
180#define CST4336_SPI_MODE_MASK 0x00000001
181#define CST4336_SPROM_PRESENT 0x00000002
182#define CST4336_OTP_PRESENT 0x00000004
183#define CST4336_ARMREMAP_0 0x00000008
184#define CST4336_ILPDIV_EN_MASK 0x00000010
185#define CST4336_ILPDIV_EN_SHIFT 4
186#define CST4336_XTAL_PD_POL_MASK 0x00000020
187#define CST4336_XTAL_PD_POL_SHIFT 5
188#define CST4336_LPO_SEL_MASK 0x00000040
189#define CST4336_LPO_SEL_SHIFT 6
190#define CST4336_RES_INIT_MODE_MASK 0x00000180
191#define CST4336_RES_INIT_MODE_SHIFT 7
192#define CST4336_CBUCK_MODE_MASK 0x00000600
193#define CST4336_CBUCK_MODE_SHIFT 9
194
195/* 4313 chip-specific ChipStatus register bits */
196#define CST4313_SPROM_PRESENT 1
197#define CST4313_OTP_PRESENT 2
198#define CST4313_SPROM_OTP_SEL_MASK 0x00000002
199#define CST4313_SPROM_OTP_SEL_SHIFT 0
200
201/* 4313 Chip specific ChipControl register bits */
202 /* 12 mA drive strengh for later 4313 */
203#define CCTRL_4313_12MA_LED_DRIVE 0x00000007
204
205/* Manufacturer Ids */
206#define MFGID_ARM 0x43b
207#define MFGID_BRCM 0x4bf
208#define MFGID_MIPS 0x4a7
209
210/* Enumeration ROM registers */
211#define ER_EROMENTRY 0x000
212#define ER_REMAPCONTROL 0xe00
213#define ER_REMAPSELECT 0xe04
214#define ER_MASTERSELECT 0xe10
215#define ER_ITCR 0xf00
216#define ER_ITIP 0xf04
217
218/* Erom entries */
219#define ER_TAG 0xe
220#define ER_TAG1 0x6
221#define ER_VALID 1
222#define ER_CI 0
223#define ER_MP 2
224#define ER_ADD 4
225#define ER_END 0xe
226#define ER_BAD 0xffffffff
227
228/* EROM CompIdentA */
229#define CIA_MFG_MASK 0xfff00000
230#define CIA_MFG_SHIFT 20
231#define CIA_CID_MASK 0x000fff00
232#define CIA_CID_SHIFT 8
233#define CIA_CCL_MASK 0x000000f0
234#define CIA_CCL_SHIFT 4
235
236/* EROM CompIdentB */
237#define CIB_REV_MASK 0xff000000
238#define CIB_REV_SHIFT 24
239#define CIB_NSW_MASK 0x00f80000
240#define CIB_NSW_SHIFT 19
241#define CIB_NMW_MASK 0x0007c000
242#define CIB_NMW_SHIFT 14
243#define CIB_NSP_MASK 0x00003e00
244#define CIB_NSP_SHIFT 9
245#define CIB_NMP_MASK 0x000001f0
246#define CIB_NMP_SHIFT 4
247
248/* EROM AddrDesc */
249#define AD_ADDR_MASK 0xfffff000
250#define AD_SP_MASK 0x00000f00
251#define AD_SP_SHIFT 8
252#define AD_ST_MASK 0x000000c0
253#define AD_ST_SHIFT 6
254#define AD_ST_SLAVE 0x00000000
255#define AD_ST_BRIDGE 0x00000040
256#define AD_ST_SWRAP 0x00000080
257#define AD_ST_MWRAP 0x000000c0
258#define AD_SZ_MASK 0x00000030
259#define AD_SZ_SHIFT 4
260#define AD_SZ_4K 0x00000000
261#define AD_SZ_8K 0x00000010
262#define AD_SZ_16K 0x00000020
263#define AD_SZ_SZD 0x00000030
264#define AD_AG32 0x00000008
265#define AD_ADDR_ALIGN 0x00000fff
266#define AD_SZ_BASE 0x00001000 /* 4KB */
267
268/* EROM SizeDesc */
269#define SD_SZ_MASK 0xfffff000
270#define SD_SG32 0x00000008
271#define SD_SZ_ALIGN 0x00000fff
272
273/* PCI config space bit 4 for 4306c0 slow clock source */
274#define PCI_CFG_GPIO_SCS 0x10
275/* PCI config space GPIO 14 for Xtal power-up */
276#define PCI_CFG_GPIO_XTAL 0x40
277/* PCI config space GPIO 15 for PLL power-down */
278#define PCI_CFG_GPIO_PLL 0x80
279
280/* power control defines */
281#define PLL_DELAY 150 /* us pll on delay */
282#define FREF_DELAY 200 /* us fref change delay */
283#define XTAL_ON_DELAY 1000 /* us crystal power-on delay */
284
285/* resetctrl */
286#define AIRC_RESET 1
287
288#define NOREV -1 /* Invalid rev */
289
290/* GPIO Based LED powersave defines */
291#define DEFAULT_GPIO_ONTIME 10 /* Default: 10% on */
292#define DEFAULT_GPIO_OFFTIME 90 /* Default: 10% on */
293
294/* When Srom support present, fields in sromcontrol */
295#define SRC_START 0x80000000
296#define SRC_BUSY 0x80000000
297#define SRC_OPCODE 0x60000000
298#define SRC_OP_READ 0x00000000
299#define SRC_OP_WRITE 0x20000000
300#define SRC_OP_WRDIS 0x40000000
301#define SRC_OP_WREN 0x60000000
302#define SRC_OTPSEL 0x00000010
303#define SRC_LOCK 0x00000008
304#define SRC_SIZE_MASK 0x00000006
305#define SRC_SIZE_1K 0x00000000
306#define SRC_SIZE_4K 0x00000002
307#define SRC_SIZE_16K 0x00000004
308#define SRC_SIZE_SHIFT 1
309#define SRC_PRESENT 0x00000001
310
311/* External PA enable mask */
312#define GPIO_CTRL_EPA_EN_MASK 0x40
313
314#define DEFAULT_GPIOTIMERVAL \
315 ((DEFAULT_GPIO_ONTIME << GPIO_ONTIME_SHIFT) | DEFAULT_GPIO_OFFTIME)
316
317#define BADIDX (SI_MAXCORES + 1)
318
319/* Newer chips can access PCI/PCIE and CC core without requiring to change
320 * PCI BAR0 WIN
321 */
322#define SI_FAST(si) (((si)->pub.buscoretype == PCIE_CORE_ID) || \
323 (((si)->pub.buscoretype == PCI_CORE_ID) && \
324 (si)->pub.buscorerev >= 13))
325
326#define CCREGS_FAST(si) (((char __iomem *)((si)->curmap) + \
327 PCI_16KB0_CCREGS_OFFSET))
328
329#define IS_SIM(chippkg) \
330 ((chippkg == HDLSIM_PKG_ID) || (chippkg == HWSIM_PKG_ID))
331
332/*
333 * Macros to disable/restore function core(D11, ENET, ILINE20, etc) interrupts
334 * before after core switching to avoid invalid register accesss inside ISR.
335 */
336#define INTR_OFF(si, intr_val) \
337 if ((si)->intrsoff_fn && \
338 (si)->coreid[(si)->curidx] == (si)->dev_coreid) \
339 intr_val = (*(si)->intrsoff_fn)((si)->intr_arg)
340
341#define INTR_RESTORE(si, intr_val) \
342 if ((si)->intrsrestore_fn && \
343 (si)->coreid[(si)->curidx] == (si)->dev_coreid) \
344 (*(si)->intrsrestore_fn)((si)->intr_arg, intr_val)
345
346#define PCI(si) ((si)->pub.buscoretype == PCI_CORE_ID)
347#define PCIE(si) ((si)->pub.buscoretype == PCIE_CORE_ID)
348
349#define PCI_FORCEHT(si) (PCIE(si) && (si->pub.chip == BCM4716_CHIP_ID))
350
351#ifdef BCMDBG
352#define SI_MSG(args) printk args
353#else
354#define SI_MSG(args)
355#endif /* BCMDBG */
356
357#define GOODCOREADDR(x, b) \
358 (((x) >= (b)) && ((x) < ((b) + SI_MAXCORES * SI_CORE_SIZE)) && \
359 IS_ALIGNED((x), SI_CORE_SIZE))
360
361#define PCIEREGS(si) ((__iomem char *)((si)->curmap) + \
362 PCI_16KB0_PCIREGS_OFFSET)
363
364struct aidmp {
365 u32 oobselina30; /* 0x000 */
366 u32 oobselina74; /* 0x004 */
367 u32 PAD[6];
368 u32 oobselinb30; /* 0x020 */
369 u32 oobselinb74; /* 0x024 */
370 u32 PAD[6];
371 u32 oobselinc30; /* 0x040 */
372 u32 oobselinc74; /* 0x044 */
373 u32 PAD[6];
374 u32 oobselind30; /* 0x060 */
375 u32 oobselind74; /* 0x064 */
376 u32 PAD[38];
377 u32 oobselouta30; /* 0x100 */
378 u32 oobselouta74; /* 0x104 */
379 u32 PAD[6];
380 u32 oobseloutb30; /* 0x120 */
381 u32 oobseloutb74; /* 0x124 */
382 u32 PAD[6];
383 u32 oobseloutc30; /* 0x140 */
384 u32 oobseloutc74; /* 0x144 */
385 u32 PAD[6];
386 u32 oobseloutd30; /* 0x160 */
387 u32 oobseloutd74; /* 0x164 */
388 u32 PAD[38];
389 u32 oobsynca; /* 0x200 */
390 u32 oobseloutaen; /* 0x204 */
391 u32 PAD[6];
392 u32 oobsyncb; /* 0x220 */
393 u32 oobseloutben; /* 0x224 */
394 u32 PAD[6];
395 u32 oobsyncc; /* 0x240 */
396 u32 oobseloutcen; /* 0x244 */
397 u32 PAD[6];
398 u32 oobsyncd; /* 0x260 */
399 u32 oobseloutden; /* 0x264 */
400 u32 PAD[38];
401 u32 oobaextwidth; /* 0x300 */
402 u32 oobainwidth; /* 0x304 */
403 u32 oobaoutwidth; /* 0x308 */
404 u32 PAD[5];
405 u32 oobbextwidth; /* 0x320 */
406 u32 oobbinwidth; /* 0x324 */
407 u32 oobboutwidth; /* 0x328 */
408 u32 PAD[5];
409 u32 oobcextwidth; /* 0x340 */
410 u32 oobcinwidth; /* 0x344 */
411 u32 oobcoutwidth; /* 0x348 */
412 u32 PAD[5];
413 u32 oobdextwidth; /* 0x360 */
414 u32 oobdinwidth; /* 0x364 */
415 u32 oobdoutwidth; /* 0x368 */
416 u32 PAD[37];
417 u32 ioctrlset; /* 0x400 */
418 u32 ioctrlclear; /* 0x404 */
419 u32 ioctrl; /* 0x408 */
420 u32 PAD[61];
421 u32 iostatus; /* 0x500 */
422 u32 PAD[127];
423 u32 ioctrlwidth; /* 0x700 */
424 u32 iostatuswidth; /* 0x704 */
425 u32 PAD[62];
426 u32 resetctrl; /* 0x800 */
427 u32 resetstatus; /* 0x804 */
428 u32 resetreadid; /* 0x808 */
429 u32 resetwriteid; /* 0x80c */
430 u32 PAD[60];
431 u32 errlogctrl; /* 0x900 */
432 u32 errlogdone; /* 0x904 */
433 u32 errlogstatus; /* 0x908 */
434 u32 errlogaddrlo; /* 0x90c */
435 u32 errlogaddrhi; /* 0x910 */
436 u32 errlogid; /* 0x914 */
437 u32 errloguser; /* 0x918 */
438 u32 errlogflags; /* 0x91c */
439 u32 PAD[56];
440 u32 intstatus; /* 0xa00 */
441 u32 PAD[127];
442 u32 config; /* 0xe00 */
443 u32 PAD[63];
444 u32 itcr; /* 0xf00 */
445 u32 PAD[3];
446 u32 itipooba; /* 0xf10 */
447 u32 itipoobb; /* 0xf14 */
448 u32 itipoobc; /* 0xf18 */
449 u32 itipoobd; /* 0xf1c */
450 u32 PAD[4];
451 u32 itipoobaout; /* 0xf30 */
452 u32 itipoobbout; /* 0xf34 */
453 u32 itipoobcout; /* 0xf38 */
454 u32 itipoobdout; /* 0xf3c */
455 u32 PAD[4];
456 u32 itopooba; /* 0xf50 */
457 u32 itopoobb; /* 0xf54 */
458 u32 itopoobc; /* 0xf58 */
459 u32 itopoobd; /* 0xf5c */
460 u32 PAD[4];
461 u32 itopoobain; /* 0xf70 */
462 u32 itopoobbin; /* 0xf74 */
463 u32 itopoobcin; /* 0xf78 */
464 u32 itopoobdin; /* 0xf7c */
465 u32 PAD[4];
466 u32 itopreset; /* 0xf90 */
467 u32 PAD[15];
468 u32 peripherialid4; /* 0xfd0 */
469 u32 peripherialid5; /* 0xfd4 */
470 u32 peripherialid6; /* 0xfd8 */
471 u32 peripherialid7; /* 0xfdc */
472 u32 peripherialid0; /* 0xfe0 */
473 u32 peripherialid1; /* 0xfe4 */
474 u32 peripherialid2; /* 0xfe8 */
475 u32 peripherialid3; /* 0xfec */
476 u32 componentid0; /* 0xff0 */
477 u32 componentid1; /* 0xff4 */
478 u32 componentid2; /* 0xff8 */
479 u32 componentid3; /* 0xffc */
480};
481
482/* EROM parsing */
483
484static u32
485get_erom_ent(struct si_pub *sih, u32 __iomem **eromptr, u32 mask, u32 match)
486{
487 u32 ent;
488 uint inv = 0, nom = 0;
489
490 while (true) {
491 ent = R_REG(*eromptr);
492 (*eromptr)++;
493
494 if (mask == 0)
495 break;
496
497 if ((ent & ER_VALID) == 0) {
498 inv++;
499 continue;
500 }
501
502 if (ent == (ER_END | ER_VALID))
503 break;
504
505 if ((ent & mask) == match)
506 break;
507
508 nom++;
509 }
510
511 return ent;
512}
513
514static u32
515get_asd(struct si_pub *sih, u32 __iomem **eromptr, uint sp, uint ad, uint st,
516 u32 *addrl, u32 *addrh, u32 *sizel, u32 *sizeh)
517{
518 u32 asd, sz, szd;
519
520 asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID);
521 if (((asd & ER_TAG1) != ER_ADD) ||
522 (((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) ||
523 ((asd & AD_ST_MASK) != st)) {
524 /* This is not what we want, "push" it back */
525 (*eromptr)--;
526 return 0;
527 }
528 *addrl = asd & AD_ADDR_MASK;
529 if (asd & AD_AG32)
530 *addrh = get_erom_ent(sih, eromptr, 0, 0);
531 else
532 *addrh = 0;
533 *sizeh = 0;
534 sz = asd & AD_SZ_MASK;
535 if (sz == AD_SZ_SZD) {
536 szd = get_erom_ent(sih, eromptr, 0, 0);
537 *sizel = szd & SD_SZ_MASK;
538 if (szd & SD_SG32)
539 *sizeh = get_erom_ent(sih, eromptr, 0, 0);
540 } else
541 *sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT);
542
543 return asd;
544}
545
546static void ai_hwfixup(struct si_info *sii)
547{
548}
549
550/* parse the enumeration rom to identify all cores */
551static void ai_scan(struct si_pub *sih, struct chipcregs __iomem *cc)
552{
553 struct si_info *sii = (struct si_info *)sih;
554
555 u32 erombase;
556 u32 __iomem *eromptr, *eromlim;
557 void __iomem *regs = cc;
558
559 erombase = R_REG(&cc->eromptr);
560
561 /* Set wrappers address */
562 sii->curwrap = (void *)((unsigned long)cc + SI_CORE_SIZE);
563
564 /* Now point the window at the erom */
565 pci_write_config_dword(sii->pbus, PCI_BAR0_WIN, erombase);
566 eromptr = regs;
567 eromlim = eromptr + (ER_REMAPCONTROL / sizeof(u32));
568
569 while (eromptr < eromlim) {
570 u32 cia, cib, cid, mfg, crev, nmw, nsw, nmp, nsp;
571 u32 mpd, asd, addrl, addrh, sizel, sizeh;
572 u32 __iomem *base;
573 uint i, j, idx;
574 bool br;
575
576 br = false;
577
578 /* Grok a component */
579 cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI);
580 if (cia == (ER_END | ER_VALID)) {
581 /* Found END of erom */
582 ai_hwfixup(sii);
583 return;
584 }
585 base = eromptr - 1;
586 cib = get_erom_ent(sih, &eromptr, 0, 0);
587
588 if ((cib & ER_TAG) != ER_CI) {
589 /* CIA not followed by CIB */
590 goto error;
591 }
592
593 cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT;
594 mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
595 crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
596 nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT;
597 nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT;
598 nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
599 nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
600
601 if (((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) || (nsp == 0))
602 continue;
603 if ((nmw + nsw == 0)) {
604 /* A component which is not a core */
605 if (cid == OOB_ROUTER_CORE_ID) {
606 asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE,
607 &addrl, &addrh, &sizel, &sizeh);
608 if (asd != 0)
609 sii->oob_router = addrl;
610 }
611 continue;
612 }
613
614 idx = sii->numcores;
615/* sii->eromptr[idx] = base; */
616 sii->cia[idx] = cia;
617 sii->cib[idx] = cib;
618 sii->coreid[idx] = cid;
619
620 for (i = 0; i < nmp; i++) {
621 mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
622 if ((mpd & ER_TAG) != ER_MP) {
623 /* Not enough MP entries for component */
624 goto error;
625 }
626 }
627
628 /* First Slave Address Descriptor should be port 0:
629 * the main register space for the core
630 */
631 asd =
632 get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh,
633 &sizel, &sizeh);
634 if (asd == 0) {
635 /* Try again to see if it is a bridge */
636 asd =
637 get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl,
638 &addrh, &sizel, &sizeh);
639 if (asd != 0)
640 br = true;
641 else if ((addrh != 0) || (sizeh != 0)
642 || (sizel != SI_CORE_SIZE)) {
643 /* First Slave ASD for core malformed */
644 goto error;
645 }
646 }
647 sii->coresba[idx] = addrl;
648 sii->coresba_size[idx] = sizel;
649 /* Get any more ASDs in port 0 */
650 j = 1;
651 do {
652 asd =
653 get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl,
654 &addrh, &sizel, &sizeh);
655 if ((asd != 0) && (j == 1) && (sizel == SI_CORE_SIZE)) {
656 sii->coresba2[idx] = addrl;
657 sii->coresba2_size[idx] = sizel;
658 }
659 j++;
660 } while (asd != 0);
661
662 /* Go through the ASDs for other slave ports */
663 for (i = 1; i < nsp; i++) {
664 j = 0;
665 do {
666 asd =
667 get_asd(sih, &eromptr, i, j++, AD_ST_SLAVE,
668 &addrl, &addrh, &sizel, &sizeh);
669 } while (asd != 0);
670 if (j == 0) {
671 /* SP has no address descriptors */
672 goto error;
673 }
674 }
675
676 /* Now get master wrappers */
677 for (i = 0; i < nmw; i++) {
678 asd =
679 get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl,
680 &addrh, &sizel, &sizeh);
681 if (asd == 0) {
682 /* Missing descriptor for MW */
683 goto error;
684 }
685 if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
686 /* Master wrapper %d is not 4KB */
687 goto error;
688 }
689 if (i == 0)
690 sii->wrapba[idx] = addrl;
691 }
692
693 /* And finally slave wrappers */
694 for (i = 0; i < nsw; i++) {
695 uint fwp = (nsp == 1) ? 0 : 1;
696 asd =
697 get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP,
698 &addrl, &addrh, &sizel, &sizeh);
699 if (asd == 0) {
700 /* Missing descriptor for SW */
701 goto error;
702 }
703 if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
704 /* Slave wrapper is not 4KB */
705 goto error;
706 }
707 if ((nmw == 0) && (i == 0))
708 sii->wrapba[idx] = addrl;
709 }
710
711 /* Don't record bridges */
712 if (br)
713 continue;
714
715 /* Done with core */
716 sii->numcores++;
717 }
718
719 error:
720 /* Reached end of erom without finding END */
721 sii->numcores = 0;
722 return;
723}
724
725/*
726 * This function changes the logical "focus" to the indicated core.
727 * Return the current core's virtual address. Since each core starts with the
728 * same set of registers (BIST, clock control, etc), the returned address
729 * contains the first register of this 'common' register block (not to be
730 * confused with 'common core').
731 */
732void __iomem *ai_setcoreidx(struct si_pub *sih, uint coreidx)
733{
734 struct si_info *sii = (struct si_info *)sih;
735 u32 addr = sii->coresba[coreidx];
736 u32 wrap = sii->wrapba[coreidx];
737
738 if (coreidx >= sii->numcores)
739 return NULL;
740
741 /* point bar0 window */
742 pci_write_config_dword(sii->pbus, PCI_BAR0_WIN, addr);
743 /* point bar0 2nd 4KB window */
744 pci_write_config_dword(sii->pbus, PCI_BAR0_WIN2, wrap);
745 sii->curidx = coreidx;
746
747 return sii->curmap;
748}
749
750/* Return the number of address spaces in current core */
751int ai_numaddrspaces(struct si_pub *sih)
752{
753 return 2;
754}
755
756/* Return the address of the nth address space in the current core */
757u32 ai_addrspace(struct si_pub *sih, uint asidx)
758{
759 struct si_info *sii;
760 uint cidx;
761
762 sii = (struct si_info *)sih;
763 cidx = sii->curidx;
764
765 if (asidx == 0)
766 return sii->coresba[cidx];
767 else if (asidx == 1)
768 return sii->coresba2[cidx];
769 else {
770 /* Need to parse the erom again to find addr space */
771 return 0;
772 }
773}
774
775/* Return the size of the nth address space in the current core */
776u32 ai_addrspacesize(struct si_pub *sih, uint asidx)
777{
778 struct si_info *sii;
779 uint cidx;
780
781 sii = (struct si_info *)sih;
782 cidx = sii->curidx;
783
784 if (asidx == 0)
785 return sii->coresba_size[cidx];
786 else if (asidx == 1)
787 return sii->coresba2_size[cidx];
788 else {
789 /* Need to parse the erom again to find addr */
790 return 0;
791 }
792}
793
794uint ai_flag(struct si_pub *sih)
795{
796 struct si_info *sii;
797 struct aidmp *ai;
798
799 sii = (struct si_info *)sih;
800 ai = sii->curwrap;
801
802 return R_REG(&ai->oobselouta30) & 0x1f;
803}
804
805void ai_setint(struct si_pub *sih, int siflag)
806{
807}
808
809uint ai_corevendor(struct si_pub *sih)
810{
811 struct si_info *sii;
812 u32 cia;
813
814 sii = (struct si_info *)sih;
815 cia = sii->cia[sii->curidx];
816 return (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
817}
818
819uint ai_corerev(struct si_pub *sih)
820{
821 struct si_info *sii;
822 u32 cib;
823
824 sii = (struct si_info *)sih;
825 cib = sii->cib[sii->curidx];
826 return (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
827}
828
829bool ai_iscoreup(struct si_pub *sih)
830{
831 struct si_info *sii;
832 struct aidmp *ai;
833
834 sii = (struct si_info *)sih;
835 ai = sii->curwrap;
836
837 return (((R_REG(&ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) ==
838 SICF_CLOCK_EN)
839 && ((R_REG(&ai->resetctrl) & AIRC_RESET) == 0));
840}
841
842void ai_core_cflags_wo(struct si_pub *sih, u32 mask, u32 val)
843{
844 struct si_info *sii;
845 struct aidmp *ai;
846 u32 w;
847
848 sii = (struct si_info *)sih;
849
850 ai = sii->curwrap;
851
852 if (mask || val) {
853 w = ((R_REG(&ai->ioctrl) & ~mask) | val);
854 W_REG(&ai->ioctrl, w);
855 }
856}
857
858u32 ai_core_cflags(struct si_pub *sih, u32 mask, u32 val)
859{
860 struct si_info *sii;
861 struct aidmp *ai;
862 u32 w;
863
864 sii = (struct si_info *)sih;
865 ai = sii->curwrap;
866
867 if (mask || val) {
868 w = ((R_REG(&ai->ioctrl) & ~mask) | val);
869 W_REG(&ai->ioctrl, w);
870 }
871
872 return R_REG(&ai->ioctrl);
873}
874
875/* return true if PCIE capability exists in the pci config space */
876static bool ai_ispcie(struct si_info *sii)
877{
878 u8 cap_ptr;
879
880 cap_ptr =
881 pcicore_find_pci_capability(sii->pbus, PCI_CAP_ID_EXP, NULL,
882 NULL);
883 if (!cap_ptr)
884 return false;
885
886 return true;
887}
888
889static bool ai_buscore_prep(struct si_info *sii)
890{
891 /* kludge to enable the clock on the 4306 which lacks a slowclock */
892 if (!ai_ispcie(sii))
893 ai_clkctl_xtal(&sii->pub, XTAL | PLL, ON);
894 return true;
895}
896
897u32 ai_core_sflags(struct si_pub *sih, u32 mask, u32 val)
898{
899 struct si_info *sii;
900 struct aidmp *ai;
901 u32 w;
902
903 sii = (struct si_info *)sih;
904 ai = sii->curwrap;
905
906 if (mask || val) {
907 w = ((R_REG(&ai->iostatus) & ~mask) | val);
908 W_REG(&ai->iostatus, w);
909 }
910
911 return R_REG(&ai->iostatus);
912}
913
914static bool
915ai_buscore_setup(struct si_info *sii, u32 savewin, uint *origidx)
916{
917 bool pci, pcie;
918 uint i;
919 uint pciidx, pcieidx, pcirev, pcierev;
920 struct chipcregs __iomem *cc;
921
922 cc = ai_setcoreidx(&sii->pub, SI_CC_IDX);
923
924 /* get chipcommon rev */
925 sii->pub.ccrev = (int)ai_corerev(&sii->pub);
926
927 /* get chipcommon chipstatus */
928 if (sii->pub.ccrev >= 11)
929 sii->pub.chipst = R_REG(&cc->chipstatus);
930
931 /* get chipcommon capabilites */
932 sii->pub.cccaps = R_REG(&cc->capabilities);
933 /* get chipcommon extended capabilities */
934
935 if (sii->pub.ccrev >= 35)
936 sii->pub.cccaps_ext = R_REG(&cc->capabilities_ext);
937
938 /* get pmu rev and caps */
939 if (sii->pub.cccaps & CC_CAP_PMU) {
940 sii->pub.pmucaps = R_REG(&cc->pmucapabilities);
941 sii->pub.pmurev = sii->pub.pmucaps & PCAP_REV_MASK;
942 }
943
944 /* figure out bus/orignal core idx */
945 sii->pub.buscoretype = NODEV_CORE_ID;
946 sii->pub.buscorerev = NOREV;
947 sii->pub.buscoreidx = BADIDX;
948
949 pci = pcie = false;
950 pcirev = pcierev = NOREV;
951 pciidx = pcieidx = BADIDX;
952
953 for (i = 0; i < sii->numcores; i++) {
954 uint cid, crev;
955
956 ai_setcoreidx(&sii->pub, i);
957 cid = ai_coreid(&sii->pub);
958 crev = ai_corerev(&sii->pub);
959
960 if (cid == PCI_CORE_ID) {
961 pciidx = i;
962 pcirev = crev;
963 pci = true;
964 } else if (cid == PCIE_CORE_ID) {
965 pcieidx = i;
966 pcierev = crev;
967 pcie = true;
968 }
969
970 /* find the core idx before entering this func. */
971 if ((savewin && (savewin == sii->coresba[i])) ||
972 (cc == sii->regs[i]))
973 *origidx = i;
974 }
975
976 if (pci && pcie) {
977 if (ai_ispcie(sii))
978 pci = false;
979 else
980 pcie = false;
981 }
982 if (pci) {
983 sii->pub.buscoretype = PCI_CORE_ID;
984 sii->pub.buscorerev = pcirev;
985 sii->pub.buscoreidx = pciidx;
986 } else if (pcie) {
987 sii->pub.buscoretype = PCIE_CORE_ID;
988 sii->pub.buscorerev = pcierev;
989 sii->pub.buscoreidx = pcieidx;
990 }
991
992 /* fixup necessary chip/core configurations */
993 if (SI_FAST(sii)) {
994 if (!sii->pch) {
995 sii->pch = pcicore_init(&sii->pub, sii->pbus,
996 (__iomem void *)PCIEREGS(sii));
997 if (sii->pch == NULL)
998 return false;
999 }
1000 }
1001 if (ai_pci_fixcfg(&sii->pub)) {
1002 /* si_doattach: si_pci_fixcfg failed */
1003 return false;
1004 }
1005
1006 /* return to the original core */
1007 ai_setcoreidx(&sii->pub, *origidx);
1008
1009 return true;
1010}
1011
1012/*
1013 * get boardtype and boardrev
1014 */
1015static __used void ai_nvram_process(struct si_info *sii)
1016{
1017 uint w = 0;
1018
1019 /* do a pci config read to get subsystem id and subvendor id */
1020 pci_read_config_dword(sii->pbus, PCI_SUBSYSTEM_VENDOR_ID, &w);
1021
1022 sii->pub.boardvendor = w & 0xffff;
1023 sii->pub.boardtype = (w >> 16) & 0xffff;
1024 sii->pub.boardflags = getintvar(&sii->pub, BRCMS_SROM_BOARDFLAGS);
1025}
1026
1027static struct si_info *ai_doattach(struct si_info *sii,
1028 void __iomem *regs, struct pci_dev *pbus)
1029{
1030 struct si_pub *sih = &sii->pub;
1031 u32 w, savewin;
1032 struct chipcregs __iomem *cc;
1033 uint socitype;
1034 uint origidx;
1035
1036 memset((unsigned char *) sii, 0, sizeof(struct si_info));
1037
1038 savewin = 0;
1039
1040 sih->buscoreidx = BADIDX;
1041
1042 sii->curmap = regs;
1043 sii->pbus = pbus;
1044
1045 /* find Chipcommon address */
1046 pci_read_config_dword(sii->pbus, PCI_BAR0_WIN, &savewin);
1047 if (!GOODCOREADDR(savewin, SI_ENUM_BASE))
1048 savewin = SI_ENUM_BASE;
1049
1050 pci_write_config_dword(sii->pbus, PCI_BAR0_WIN,
1051 SI_ENUM_BASE);
1052 cc = (struct chipcregs __iomem *) regs;
1053
1054 /* bus/core/clk setup for register access */
1055 if (!ai_buscore_prep(sii))
1056 return NULL;
1057
1058 /*
1059 * ChipID recognition.
1060 * We assume we can read chipid at offset 0 from the regs arg.
1061 * If we add other chiptypes (or if we need to support old sdio
1062 * hosts w/o chipcommon), some way of recognizing them needs to
1063 * be added here.
1064 */
1065 w = R_REG(&cc->chipid);
1066 socitype = (w & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
1067 /* Might as wll fill in chip id rev & pkg */
1068 sih->chip = w & CID_ID_MASK;
1069 sih->chiprev = (w & CID_REV_MASK) >> CID_REV_SHIFT;
1070 sih->chippkg = (w & CID_PKG_MASK) >> CID_PKG_SHIFT;
1071
1072 sih->issim = false;
1073
1074 /* scan for cores */
1075 if (socitype == SOCI_AI) {
1076 SI_MSG(("Found chip type AI (0x%08x)\n", w));
1077 /* pass chipc address instead of original core base */
1078 ai_scan(&sii->pub, cc);
1079 } else {
1080 /* Found chip of unknown type */
1081 return NULL;
1082 }
1083 /* no cores found, bail out */
1084 if (sii->numcores == 0)
1085 return NULL;
1086
1087 /* bus/core/clk setup */
1088 origidx = SI_CC_IDX;
1089 if (!ai_buscore_setup(sii, savewin, &origidx))
1090 goto exit;
1091
1092 /* Init nvram from sprom/otp if they exist */
1093 if (srom_var_init(&sii->pub, cc))
1094 goto exit;
1095
1096 ai_nvram_process(sii);
1097
1098 /* === NVRAM, clock is ready === */
1099 cc = (struct chipcregs __iomem *) ai_setcore(sih, CC_CORE_ID, 0);
1100 W_REG(&cc->gpiopullup, 0);
1101 W_REG(&cc->gpiopulldown, 0);
1102 ai_setcoreidx(sih, origidx);
1103
1104 /* PMU specific initializations */
1105 if (sih->cccaps & CC_CAP_PMU) {
1106 u32 xtalfreq;
1107 si_pmu_init(sih);
1108 si_pmu_chip_init(sih);
1109
1110 xtalfreq = si_pmu_measure_alpclk(sih);
1111 si_pmu_pll_init(sih, xtalfreq);
1112 si_pmu_res_init(sih);
1113 si_pmu_swreg_init(sih);
1114 }
1115
1116 /* setup the GPIO based LED powersave register */
1117 w = getintvar(sih, BRCMS_SROM_LEDDC);
1118 if (w == 0)
1119 w = DEFAULT_GPIOTIMERVAL;
1120 ai_corereg(sih, SI_CC_IDX, offsetof(struct chipcregs, gpiotimerval),
1121 ~0, w);
1122
1123 if (PCIE(sii))
1124 pcicore_attach(sii->pch, SI_DOATTACH);
1125
1126 if (sih->chip == BCM43224_CHIP_ID) {
1127 /*
1128 * enable 12 mA drive strenth for 43224 and
1129 * set chipControl register bit 15
1130 */
1131 if (sih->chiprev == 0) {
1132 SI_MSG(("Applying 43224A0 WARs\n"));
1133 ai_corereg(sih, SI_CC_IDX,
1134 offsetof(struct chipcregs, chipcontrol),
1135 CCTRL43224_GPIO_TOGGLE,
1136 CCTRL43224_GPIO_TOGGLE);
1137 si_pmu_chipcontrol(sih, 0, CCTRL_43224A0_12MA_LED_DRIVE,
1138 CCTRL_43224A0_12MA_LED_DRIVE);
1139 }
1140 if (sih->chiprev >= 1) {
1141 SI_MSG(("Applying 43224B0+ WARs\n"));
1142 si_pmu_chipcontrol(sih, 0, CCTRL_43224B0_12MA_LED_DRIVE,
1143 CCTRL_43224B0_12MA_LED_DRIVE);
1144 }
1145 }
1146
1147 if (sih->chip == BCM4313_CHIP_ID) {
1148 /*
1149 * enable 12 mA drive strenth for 4313 and
1150 * set chipControl register bit 1
1151 */
1152 SI_MSG(("Applying 4313 WARs\n"));
1153 si_pmu_chipcontrol(sih, 0, CCTRL_4313_12MA_LED_DRIVE,
1154 CCTRL_4313_12MA_LED_DRIVE);
1155 }
1156
1157 return sii;
1158
1159 exit:
1160 if (sii->pch)
1161 pcicore_deinit(sii->pch);
1162 sii->pch = NULL;
1163
1164 return NULL;
1165}
1166
1167/*
1168 * Allocate a si handle.
1169 * devid - pci device id (used to determine chip#)
1170 * osh - opaque OS handle
1171 * regs - virtual address of initial core registers
1172 */
1173struct si_pub *
1174ai_attach(void __iomem *regs, struct pci_dev *sdh)
1175{
1176 struct si_info *sii;
1177
1178 /* alloc struct si_info */
1179 sii = kmalloc(sizeof(struct si_info), GFP_ATOMIC);
1180 if (sii == NULL)
1181 return NULL;
1182
1183 if (ai_doattach(sii, regs, sdh) == NULL) {
1184 kfree(sii);
1185 return NULL;
1186 }
1187
1188 return (struct si_pub *) sii;
1189}
1190
1191/* may be called with core in reset */
1192void ai_detach(struct si_pub *sih)
1193{
1194 struct si_info *sii;
1195
1196 struct si_pub *si_local = NULL;
1197 memcpy(&si_local, &sih, sizeof(struct si_pub **));
1198
1199 sii = (struct si_info *)sih;
1200
1201 if (sii == NULL)
1202 return;
1203
1204 if (sii->pch)
1205 pcicore_deinit(sii->pch);
1206 sii->pch = NULL;
1207
1208 srom_free_vars(sih);
1209 kfree(sii);
1210}
1211
1212/* register driver interrupt disabling and restoring callback functions */
1213void
1214ai_register_intr_callback(struct si_pub *sih, void *intrsoff_fn,
1215 void *intrsrestore_fn,
1216 void *intrsenabled_fn, void *intr_arg)
1217{
1218 struct si_info *sii;
1219
1220 sii = (struct si_info *)sih;
1221 sii->intr_arg = intr_arg;
1222 sii->intrsoff_fn = (u32 (*)(void *)) intrsoff_fn;
1223 sii->intrsrestore_fn = (void (*) (void *, u32)) intrsrestore_fn;
1224 sii->intrsenabled_fn = (bool (*)(void *)) intrsenabled_fn;
1225 /* save current core id. when this function called, the current core
1226 * must be the core which provides driver functions(il, et, wl, etc.)
1227 */
1228 sii->dev_coreid = sii->coreid[sii->curidx];
1229}
1230
1231void ai_deregister_intr_callback(struct si_pub *sih)
1232{
1233 struct si_info *sii;
1234
1235 sii = (struct si_info *)sih;
1236 sii->intrsoff_fn = NULL;
1237}
1238
1239uint ai_coreid(struct si_pub *sih)
1240{
1241 struct si_info *sii;
1242
1243 sii = (struct si_info *)sih;
1244 return sii->coreid[sii->curidx];
1245}
1246
1247uint ai_coreidx(struct si_pub *sih)
1248{
1249 struct si_info *sii;
1250
1251 sii = (struct si_info *)sih;
1252 return sii->curidx;
1253}
1254
1255bool ai_backplane64(struct si_pub *sih)
1256{
1257 return (sih->cccaps & CC_CAP_BKPLN64) != 0;
1258}
1259
1260/* return index of coreid or BADIDX if not found */
1261uint ai_findcoreidx(struct si_pub *sih, uint coreid, uint coreunit)
1262{
1263 struct si_info *sii;
1264 uint found;
1265 uint i;
1266
1267 sii = (struct si_info *)sih;
1268
1269 found = 0;
1270
1271 for (i = 0; i < sii->numcores; i++)
1272 if (sii->coreid[i] == coreid) {
1273 if (found == coreunit)
1274 return i;
1275 found++;
1276 }
1277
1278 return BADIDX;
1279}
1280
1281/*
1282 * This function changes logical "focus" to the indicated core;
1283 * must be called with interrupts off.
1284 * Moreover, callers should keep interrupts off during switching
1285 * out of and back to d11 core.
1286 */
1287void __iomem *ai_setcore(struct si_pub *sih, uint coreid, uint coreunit)
1288{
1289 uint idx;
1290
1291 idx = ai_findcoreidx(sih, coreid, coreunit);
1292 if (idx >= SI_MAXCORES)
1293 return NULL;
1294
1295 return ai_setcoreidx(sih, idx);
1296}
1297
1298/* Turn off interrupt as required by ai_setcore, before switch core */
1299void __iomem *ai_switch_core(struct si_pub *sih, uint coreid, uint *origidx,
1300 uint *intr_val)
1301{
1302 void __iomem *cc;
1303 struct si_info *sii;
1304
1305 sii = (struct si_info *)sih;
1306
1307 if (SI_FAST(sii)) {
1308 /* Overloading the origidx variable to remember the coreid,
1309 * this works because the core ids cannot be confused with
1310 * core indices.
1311 */
1312 *origidx = coreid;
1313 if (coreid == CC_CORE_ID)
1314 return CCREGS_FAST(sii);
1315 else if (coreid == sih->buscoretype)
1316 return PCIEREGS(sii);
1317 }
1318 INTR_OFF(sii, *intr_val);
1319 *origidx = sii->curidx;
1320 cc = ai_setcore(sih, coreid, 0);
1321 return cc;
1322}
1323
1324/* restore coreidx and restore interrupt */
1325void ai_restore_core(struct si_pub *sih, uint coreid, uint intr_val)
1326{
1327 struct si_info *sii;
1328
1329 sii = (struct si_info *)sih;
1330 if (SI_FAST(sii)
1331 && ((coreid == CC_CORE_ID) || (coreid == sih->buscoretype)))
1332 return;
1333
1334 ai_setcoreidx(sih, coreid);
1335 INTR_RESTORE(sii, intr_val);
1336}
1337
1338void ai_write_wrapperreg(struct si_pub *sih, u32 offset, u32 val)
1339{
1340 struct si_info *sii = (struct si_info *)sih;
1341 u32 *w = (u32 *) sii->curwrap;
1342 W_REG(w + (offset / 4), val);
1343 return;
1344}
1345
1346/*
1347 * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set
1348 * operation, switch back to the original core, and return the new value.
1349 *
1350 * When using the silicon backplane, no fiddling with interrupts or core
1351 * switches is needed.
1352 *
1353 * Also, when using pci/pcie, we can optimize away the core switching for pci
1354 * registers and (on newer pci cores) chipcommon registers.
1355 */
1356uint ai_corereg(struct si_pub *sih, uint coreidx, uint regoff, uint mask,
1357 uint val)
1358{
1359 uint origidx = 0;
1360 u32 __iomem *r = NULL;
1361 uint w;
1362 uint intr_val = 0;
1363 bool fast = false;
1364 struct si_info *sii;
1365
1366 sii = (struct si_info *)sih;
1367
1368 if (coreidx >= SI_MAXCORES)
1369 return 0;
1370
1371 /*
1372 * If pci/pcie, we can get at pci/pcie regs
1373 * and on newer cores to chipc
1374 */
1375 if ((sii->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
1376 /* Chipc registers are mapped at 12KB */
1377 fast = true;
1378 r = (u32 __iomem *)((__iomem char *)sii->curmap +
1379 PCI_16KB0_CCREGS_OFFSET + regoff);
1380 } else if (sii->pub.buscoreidx == coreidx) {
1381 /*
1382 * pci registers are at either in the last 2KB of
1383 * an 8KB window or, in pcie and pci rev 13 at 8KB
1384 */
1385 fast = true;
1386 if (SI_FAST(sii))
1387 r = (u32 __iomem *)((__iomem char *)sii->curmap +
1388 PCI_16KB0_PCIREGS_OFFSET + regoff);
1389 else
1390 r = (u32 __iomem *)((__iomem char *)sii->curmap +
1391 ((regoff >= SBCONFIGOFF) ?
1392 PCI_BAR0_PCISBR_OFFSET :
1393 PCI_BAR0_PCIREGS_OFFSET) + regoff);
1394 }
1395
1396 if (!fast) {
1397 INTR_OFF(sii, intr_val);
1398
1399 /* save current core index */
1400 origidx = ai_coreidx(&sii->pub);
1401
1402 /* switch core */
1403 r = (u32 __iomem *) ((unsigned char __iomem *)
1404 ai_setcoreidx(&sii->pub, coreidx) + regoff);
1405 }
1406
1407 /* mask and set */
1408 if (mask || val) {
1409 w = (R_REG(r) & ~mask) | val;
1410 W_REG(r, w);
1411 }
1412
1413 /* readback */
1414 w = R_REG(r);
1415
1416 if (!fast) {
1417 /* restore core index */
1418 if (origidx != coreidx)
1419 ai_setcoreidx(&sii->pub, origidx);
1420
1421 INTR_RESTORE(sii, intr_val);
1422 }
1423
1424 return w;
1425}
1426
1427void ai_core_disable(struct si_pub *sih, u32 bits)
1428{
1429 struct si_info *sii;
1430 u32 dummy;
1431 struct aidmp *ai;
1432
1433 sii = (struct si_info *)sih;
1434
1435 ai = sii->curwrap;
1436
1437 /* if core is already in reset, just return */
1438 if (R_REG(&ai->resetctrl) & AIRC_RESET)
1439 return;
1440
1441 W_REG(&ai->ioctrl, bits);
1442 dummy = R_REG(&ai->ioctrl);
1443 udelay(10);
1444
1445 W_REG(&ai->resetctrl, AIRC_RESET);
1446 udelay(1);
1447}
1448
1449/* reset and re-enable a core
1450 * inputs:
1451 * bits - core specific bits that are set during and after reset sequence
1452 * resetbits - core specific bits that are set only during reset sequence
1453 */
1454void ai_core_reset(struct si_pub *sih, u32 bits, u32 resetbits)
1455{
1456 struct si_info *sii;
1457 struct aidmp *ai;
1458 u32 dummy;
1459
1460 sii = (struct si_info *)sih;
1461 ai = sii->curwrap;
1462
1463 /*
1464 * Must do the disable sequence first to work
1465 * for arbitrary current core state.
1466 */
1467 ai_core_disable(sih, (bits | resetbits));
1468
1469 /*
1470 * Now do the initialization sequence.
1471 */
1472 W_REG(&ai->ioctrl, (bits | SICF_FGC | SICF_CLOCK_EN));
1473 dummy = R_REG(&ai->ioctrl);
1474 W_REG(&ai->resetctrl, 0);
1475 udelay(1);
1476
1477 W_REG(&ai->ioctrl, (bits | SICF_CLOCK_EN));
1478 dummy = R_REG(&ai->ioctrl);
1479 udelay(1);
1480}
1481
1482/* return the slow clock source - LPO, XTAL, or PCI */
1483static uint ai_slowclk_src(struct si_info *sii)
1484{
1485 struct chipcregs __iomem *cc;
1486 u32 val;
1487
1488 if (sii->pub.ccrev < 6) {
1489 pci_read_config_dword(sii->pbus, PCI_GPIO_OUT,
1490 &val);
1491 if (val & PCI_CFG_GPIO_SCS)
1492 return SCC_SS_PCI;
1493 return SCC_SS_XTAL;
1494 } else if (sii->pub.ccrev < 10) {
1495 cc = (struct chipcregs __iomem *)
1496 ai_setcoreidx(&sii->pub, sii->curidx);
1497 return R_REG(&cc->slow_clk_ctl) & SCC_SS_MASK;
1498 } else /* Insta-clock */
1499 return SCC_SS_XTAL;
1500}
1501
1502/*
1503* return the ILP (slowclock) min or max frequency
1504* precondition: we've established the chip has dynamic clk control
1505*/
1506static uint ai_slowclk_freq(struct si_info *sii, bool max_freq,
1507 struct chipcregs __iomem *cc)
1508{
1509 u32 slowclk;
1510 uint div;
1511
1512 slowclk = ai_slowclk_src(sii);
1513 if (sii->pub.ccrev < 6) {
1514 if (slowclk == SCC_SS_PCI)
1515 return max_freq ? (PCIMAXFREQ / 64)
1516 : (PCIMINFREQ / 64);
1517 else
1518 return max_freq ? (XTALMAXFREQ / 32)
1519 : (XTALMINFREQ / 32);
1520 } else if (sii->pub.ccrev < 10) {
1521 div = 4 *
1522 (((R_REG(&cc->slow_clk_ctl) & SCC_CD_MASK) >>
1523 SCC_CD_SHIFT) + 1);
1524 if (slowclk == SCC_SS_LPO)
1525 return max_freq ? LPOMAXFREQ : LPOMINFREQ;
1526 else if (slowclk == SCC_SS_XTAL)
1527 return max_freq ? (XTALMAXFREQ / div)
1528 : (XTALMINFREQ / div);
1529 else if (slowclk == SCC_SS_PCI)
1530 return max_freq ? (PCIMAXFREQ / div)
1531 : (PCIMINFREQ / div);
1532 } else {
1533 /* Chipc rev 10 is InstaClock */
1534 div = R_REG(&cc->system_clk_ctl) >> SYCC_CD_SHIFT;
1535 div = 4 * (div + 1);
1536 return max_freq ? XTALMAXFREQ : (XTALMINFREQ / div);
1537 }
1538 return 0;
1539}
1540
1541static void
1542ai_clkctl_setdelay(struct si_info *sii, struct chipcregs __iomem *cc)
1543{
1544 uint slowmaxfreq, pll_delay, slowclk;
1545 uint pll_on_delay, fref_sel_delay;
1546
1547 pll_delay = PLL_DELAY;
1548
1549 /*
1550 * If the slow clock is not sourced by the xtal then
1551 * add the xtal_on_delay since the xtal will also be
1552 * powered down by dynamic clk control logic.
1553 */
1554
1555 slowclk = ai_slowclk_src(sii);
1556 if (slowclk != SCC_SS_XTAL)
1557 pll_delay += XTAL_ON_DELAY;
1558
1559 /* Starting with 4318 it is ILP that is used for the delays */
1560 slowmaxfreq =
1561 ai_slowclk_freq(sii, (sii->pub.ccrev >= 10) ? false : true, cc);
1562
1563 pll_on_delay = ((slowmaxfreq * pll_delay) + 999999) / 1000000;
1564 fref_sel_delay = ((slowmaxfreq * FREF_DELAY) + 999999) / 1000000;
1565
1566 W_REG(&cc->pll_on_delay, pll_on_delay);
1567 W_REG(&cc->fref_sel_delay, fref_sel_delay);
1568}
1569
1570/* initialize power control delay registers */
1571void ai_clkctl_init(struct si_pub *sih)
1572{
1573 struct si_info *sii;
1574 uint origidx = 0;
1575 struct chipcregs __iomem *cc;
1576 bool fast;
1577
1578 if (!(sih->cccaps & CC_CAP_PWR_CTL))
1579 return;
1580
1581 sii = (struct si_info *)sih;
1582 fast = SI_FAST(sii);
1583 if (!fast) {
1584 origidx = sii->curidx;
1585 cc = (struct chipcregs __iomem *)
1586 ai_setcore(sih, CC_CORE_ID, 0);
1587 if (cc == NULL)
1588 return;
1589 } else {
1590 cc = (struct chipcregs __iomem *) CCREGS_FAST(sii);
1591 if (cc == NULL)
1592 return;
1593 }
1594
1595 /* set all Instaclk chip ILP to 1 MHz */
1596 if (sih->ccrev >= 10)
1597 SET_REG(&cc->system_clk_ctl, SYCC_CD_MASK,
1598 (ILP_DIV_1MHZ << SYCC_CD_SHIFT));
1599
1600 ai_clkctl_setdelay(sii, cc);
1601
1602 if (!fast)
1603 ai_setcoreidx(sih, origidx);
1604}
1605
1606/*
1607 * return the value suitable for writing to the
1608 * dot11 core FAST_PWRUP_DELAY register
1609 */
1610u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih)
1611{
1612 struct si_info *sii;
1613 uint origidx = 0;
1614 struct chipcregs __iomem *cc;
1615 uint slowminfreq;
1616 u16 fpdelay;
1617 uint intr_val = 0;
1618 bool fast;
1619
1620 sii = (struct si_info *)sih;
1621 if (sih->cccaps & CC_CAP_PMU) {
1622 INTR_OFF(sii, intr_val);
1623 fpdelay = si_pmu_fast_pwrup_delay(sih);
1624 INTR_RESTORE(sii, intr_val);
1625 return fpdelay;
1626 }
1627
1628 if (!(sih->cccaps & CC_CAP_PWR_CTL))
1629 return 0;
1630
1631 fast = SI_FAST(sii);
1632 fpdelay = 0;
1633 if (!fast) {
1634 origidx = sii->curidx;
1635 INTR_OFF(sii, intr_val);
1636 cc = (struct chipcregs __iomem *)
1637 ai_setcore(sih, CC_CORE_ID, 0);
1638 if (cc == NULL)
1639 goto done;
1640 } else {
1641 cc = (struct chipcregs __iomem *) CCREGS_FAST(sii);
1642 if (cc == NULL)
1643 goto done;
1644 }
1645
1646 slowminfreq = ai_slowclk_freq(sii, false, cc);
1647 fpdelay = (((R_REG(&cc->pll_on_delay) + 2) * 1000000) +
1648 (slowminfreq - 1)) / slowminfreq;
1649
1650 done:
1651 if (!fast) {
1652 ai_setcoreidx(sih, origidx);
1653 INTR_RESTORE(sii, intr_val);
1654 }
1655 return fpdelay;
1656}
1657
1658/* turn primary xtal and/or pll off/on */
1659int ai_clkctl_xtal(struct si_pub *sih, uint what, bool on)
1660{
1661 struct si_info *sii;
1662 u32 in, out, outen;
1663
1664 sii = (struct si_info *)sih;
1665
1666 /* pcie core doesn't have any mapping to control the xtal pu */
1667 if (PCIE(sii))
1668 return -1;
1669
1670 pci_read_config_dword(sii->pbus, PCI_GPIO_IN, &in);
1671 pci_read_config_dword(sii->pbus, PCI_GPIO_OUT, &out);
1672 pci_read_config_dword(sii->pbus, PCI_GPIO_OUTEN, &outen);
1673
1674 /*
1675 * Avoid glitching the clock if GPRS is already using it.
1676 * We can't actually read the state of the PLLPD so we infer it
1677 * by the value of XTAL_PU which *is* readable via gpioin.
1678 */
1679 if (on && (in & PCI_CFG_GPIO_XTAL))
1680 return 0;
1681
1682 if (what & XTAL)
1683 outen |= PCI_CFG_GPIO_XTAL;
1684 if (what & PLL)
1685 outen |= PCI_CFG_GPIO_PLL;
1686
1687 if (on) {
1688 /* turn primary xtal on */
1689 if (what & XTAL) {
1690 out |= PCI_CFG_GPIO_XTAL;
1691 if (what & PLL)
1692 out |= PCI_CFG_GPIO_PLL;
1693 pci_write_config_dword(sii->pbus,
1694 PCI_GPIO_OUT, out);
1695 pci_write_config_dword(sii->pbus,
1696 PCI_GPIO_OUTEN, outen);
1697 udelay(XTAL_ON_DELAY);
1698 }
1699
1700 /* turn pll on */
1701 if (what & PLL) {
1702 out &= ~PCI_CFG_GPIO_PLL;
1703 pci_write_config_dword(sii->pbus,
1704 PCI_GPIO_OUT, out);
1705 mdelay(2);
1706 }
1707 } else {
1708 if (what & XTAL)
1709 out &= ~PCI_CFG_GPIO_XTAL;
1710 if (what & PLL)
1711 out |= PCI_CFG_GPIO_PLL;
1712 pci_write_config_dword(sii->pbus,
1713 PCI_GPIO_OUT, out);
1714 pci_write_config_dword(sii->pbus,
1715 PCI_GPIO_OUTEN, outen);
1716 }
1717
1718 return 0;
1719}
1720
1721/* clk control mechanism through chipcommon, no policy checking */
1722static bool _ai_clkctl_cc(struct si_info *sii, uint mode)
1723{
1724 uint origidx = 0;
1725 struct chipcregs __iomem *cc;
1726 u32 scc;
1727 uint intr_val = 0;
1728 bool fast = SI_FAST(sii);
1729
1730 /* chipcommon cores prior to rev6 don't support dynamic clock control */
1731 if (sii->pub.ccrev < 6)
1732 return false;
1733
1734 if (!fast) {
1735 INTR_OFF(sii, intr_val);
1736 origidx = sii->curidx;
1737 cc = (struct chipcregs __iomem *)
1738 ai_setcore(&sii->pub, CC_CORE_ID, 0);
1739 } else {
1740 cc = (struct chipcregs __iomem *) CCREGS_FAST(sii);
1741 if (cc == NULL)
1742 goto done;
1743 }
1744
1745 if (!(sii->pub.cccaps & CC_CAP_PWR_CTL) && (sii->pub.ccrev < 20))
1746 goto done;
1747
1748 switch (mode) {
1749 case CLK_FAST: /* FORCEHT, fast (pll) clock */
1750 if (sii->pub.ccrev < 10) {
1751 /*
1752 * don't forget to force xtal back
1753 * on before we clear SCC_DYN_XTAL..
1754 */
1755 ai_clkctl_xtal(&sii->pub, XTAL, ON);
1756 SET_REG(&cc->slow_clk_ctl,
1757 (SCC_XC | SCC_FS | SCC_IP), SCC_IP);
1758 } else if (sii->pub.ccrev < 20) {
1759 OR_REG(&cc->system_clk_ctl, SYCC_HR);
1760 } else {
1761 OR_REG(&cc->clk_ctl_st, CCS_FORCEHT);
1762 }
1763
1764 /* wait for the PLL */
1765 if (sii->pub.cccaps & CC_CAP_PMU) {
1766 u32 htavail = CCS_HTAVAIL;
1767 SPINWAIT(((R_REG(&cc->clk_ctl_st) & htavail)
1768 == 0), PMU_MAX_TRANSITION_DLY);
1769 } else {
1770 udelay(PLL_DELAY);
1771 }
1772 break;
1773
1774 case CLK_DYNAMIC: /* enable dynamic clock control */
1775 if (sii->pub.ccrev < 10) {
1776 scc = R_REG(&cc->slow_clk_ctl);
1777 scc &= ~(SCC_FS | SCC_IP | SCC_XC);
1778 if ((scc & SCC_SS_MASK) != SCC_SS_XTAL)
1779 scc |= SCC_XC;
1780 W_REG(&cc->slow_clk_ctl, scc);
1781
1782 /*
1783 * for dynamic control, we have to
1784 * release our xtal_pu "force on"
1785 */
1786 if (scc & SCC_XC)
1787 ai_clkctl_xtal(&sii->pub, XTAL, OFF);
1788 } else if (sii->pub.ccrev < 20) {
1789 /* Instaclock */
1790 AND_REG(&cc->system_clk_ctl, ~SYCC_HR);
1791 } else {
1792 AND_REG(&cc->clk_ctl_st, ~CCS_FORCEHT);
1793 }
1794 break;
1795
1796 default:
1797 break;
1798 }
1799
1800 done:
1801 if (!fast) {
1802 ai_setcoreidx(&sii->pub, origidx);
1803 INTR_RESTORE(sii, intr_val);
1804 }
1805 return mode == CLK_FAST;
1806}
1807
1808/*
1809 * clock control policy function throught chipcommon
1810 *
1811 * set dynamic clk control mode (forceslow, forcefast, dynamic)
1812 * returns true if we are forcing fast clock
1813 * this is a wrapper over the next internal function
1814 * to allow flexible policy settings for outside caller
1815 */
1816bool ai_clkctl_cc(struct si_pub *sih, uint mode)
1817{
1818 struct si_info *sii;
1819
1820 sii = (struct si_info *)sih;
1821
1822 /* chipcommon cores prior to rev6 don't support dynamic clock control */
1823 if (sih->ccrev < 6)
1824 return false;
1825
1826 if (PCI_FORCEHT(sii))
1827 return mode == CLK_FAST;
1828
1829 return _ai_clkctl_cc(sii, mode);
1830}
1831
1832/* Build device path */
1833int ai_devpath(struct si_pub *sih, char *path, int size)
1834{
1835 int slen;
1836
1837 if (!path || size <= 0)
1838 return -1;
1839
1840 slen = snprintf(path, (size_t) size, "pci/%u/%u/",
1841 ((struct si_info *)sih)->pbus->bus->number,
1842 PCI_SLOT(((struct pci_dev *)
1843 (((struct si_info *)(sih))->pbus))->devfn));
1844
1845 if (slen < 0 || slen >= size) {
1846 path[0] = '\0';
1847 return -1;
1848 }
1849
1850 return 0;
1851}
1852
1853void ai_pci_up(struct si_pub *sih)
1854{
1855 struct si_info *sii;
1856
1857 sii = (struct si_info *)sih;
1858
1859 if (PCI_FORCEHT(sii))
1860 _ai_clkctl_cc(sii, CLK_FAST);
1861
1862 if (PCIE(sii))
1863 pcicore_up(sii->pch, SI_PCIUP);
1864
1865}
1866
1867/* Unconfigure and/or apply various WARs when system is going to sleep mode */
1868void ai_pci_sleep(struct si_pub *sih)
1869{
1870 struct si_info *sii;
1871
1872 sii = (struct si_info *)sih;
1873
1874 pcicore_sleep(sii->pch);
1875}
1876
1877/* Unconfigure and/or apply various WARs when going down */
1878void ai_pci_down(struct si_pub *sih)
1879{
1880 struct si_info *sii;
1881
1882 sii = (struct si_info *)sih;
1883
1884 /* release FORCEHT since chip is going to "down" state */
1885 if (PCI_FORCEHT(sii))
1886 _ai_clkctl_cc(sii, CLK_DYNAMIC);
1887
1888 pcicore_down(sii->pch, SI_PCIDOWN);
1889}
1890
1891/*
1892 * Configure the pci core for pci client (NIC) action
1893 * coremask is the bitvec of cores by index to be enabled.
1894 */
1895void ai_pci_setup(struct si_pub *sih, uint coremask)
1896{
1897 struct si_info *sii;
1898 struct sbpciregs __iomem *regs = NULL;
1899 u32 siflag = 0, w;
1900 uint idx = 0;
1901
1902 sii = (struct si_info *)sih;
1903
1904 if (PCI(sii)) {
1905 /* get current core index */
1906 idx = sii->curidx;
1907
1908 /* we interrupt on this backplane flag number */
1909 siflag = ai_flag(sih);
1910
1911 /* switch over to pci core */
1912 regs = ai_setcoreidx(sih, sii->pub.buscoreidx);
1913 }
1914
1915 /*
1916 * Enable sb->pci interrupts. Assume
1917 * PCI rev 2.3 support was added in pci core rev 6 and things changed..
1918 */
1919 if (PCIE(sii) || (PCI(sii) && ((sii->pub.buscorerev) >= 6))) {
1920 /* pci config write to set this core bit in PCIIntMask */
1921 pci_read_config_dword(sii->pbus, PCI_INT_MASK, &w);
1922 w |= (coremask << PCI_SBIM_SHIFT);
1923 pci_write_config_dword(sii->pbus, PCI_INT_MASK, w);
1924 } else {
1925 /* set sbintvec bit for our flag number */
1926 ai_setint(sih, siflag);
1927 }
1928
1929 if (PCI(sii)) {
1930 pcicore_pci_setup(sii->pch, regs);
1931
1932 /* switch back to previous core */
1933 ai_setcoreidx(sih, idx);
1934 }
1935}
1936
1937/*
1938 * Fixup SROMless PCI device's configuration.
1939 * The current core may be changed upon return.
1940 */
1941int ai_pci_fixcfg(struct si_pub *sih)
1942{
1943 uint origidx;
1944 void __iomem *regs = NULL;
1945 struct si_info *sii = (struct si_info *)sih;
1946
1947 /* Fixup PI in SROM shadow area to enable the correct PCI core access */
1948 /* save the current index */
1949 origidx = ai_coreidx(&sii->pub);
1950
1951 /* check 'pi' is correct and fix it if not */
1952 regs = ai_setcore(&sii->pub, sii->pub.buscoretype, 0);
1953 if (sii->pub.buscoretype == PCIE_CORE_ID)
1954 pcicore_fixcfg_pcie(sii->pch,
1955 (struct sbpcieregs __iomem *)regs);
1956 else if (sii->pub.buscoretype == PCI_CORE_ID)
1957 pcicore_fixcfg_pci(sii->pch, (struct sbpciregs __iomem *)regs);
1958
1959 /* restore the original index */
1960 ai_setcoreidx(&sii->pub, origidx);
1961
1962 pcicore_hwup(sii->pch);
1963 return 0;
1964}
1965
1966/* mask&set gpiocontrol bits */
1967u32 ai_gpiocontrol(struct si_pub *sih, u32 mask, u32 val, u8 priority)
1968{
1969 uint regoff;
1970
1971 regoff = offsetof(struct chipcregs, gpiocontrol);
1972 return ai_corereg(sih, SI_CC_IDX, regoff, mask, val);
1973}
1974
1975void ai_chipcontrl_epa4331(struct si_pub *sih, bool on)
1976{
1977 struct si_info *sii;
1978 struct chipcregs __iomem *cc;
1979 uint origidx;
1980 u32 val;
1981
1982 sii = (struct si_info *)sih;
1983 origidx = ai_coreidx(sih);
1984
1985 cc = (struct chipcregs __iomem *) ai_setcore(sih, CC_CORE_ID, 0);
1986
1987 val = R_REG(&cc->chipcontrol);
1988
1989 if (on) {
1990 if (sih->chippkg == 9 || sih->chippkg == 0xb)
1991 /* Ext PA Controls for 4331 12x9 Package */
1992 W_REG(&cc->chipcontrol, val |
1993 CCTRL4331_EXTPA_EN |
1994 CCTRL4331_EXTPA_ON_GPIO2_5);
1995 else
1996 /* Ext PA Controls for 4331 12x12 Package */
1997 W_REG(&cc->chipcontrol,
1998 val | CCTRL4331_EXTPA_EN);
1999 } else {
2000 val &= ~(CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_ON_GPIO2_5);
2001 W_REG(&cc->chipcontrol, val);
2002 }
2003
2004 ai_setcoreidx(sih, origidx);
2005}
2006
2007/* Enable BT-COEX & Ex-PA for 4313 */
2008void ai_epa_4313war(struct si_pub *sih)
2009{
2010 struct si_info *sii;
2011 struct chipcregs __iomem *cc;
2012 uint origidx;
2013
2014 sii = (struct si_info *)sih;
2015 origidx = ai_coreidx(sih);
2016
2017 cc = ai_setcore(sih, CC_CORE_ID, 0);
2018
2019 /* EPA Fix */
2020 W_REG(&cc->gpiocontrol,
2021 R_REG(&cc->gpiocontrol) | GPIO_CTRL_EPA_EN_MASK);
2022
2023 ai_setcoreidx(sih, origidx);
2024}
2025
2026/* check if the device is removed */
2027bool ai_deviceremoved(struct si_pub *sih)
2028{
2029 u32 w;
2030 struct si_info *sii;
2031
2032 sii = (struct si_info *)sih;
2033
2034 pci_read_config_dword(sii->pbus, PCI_VENDOR_ID, &w);
2035 if ((w & 0xFFFF) != PCI_VENDOR_ID_BROADCOM)
2036 return true;
2037
2038 return false;
2039}
2040
2041bool ai_is_sprom_available(struct si_pub *sih)
2042{
2043 if (sih->ccrev >= 31) {
2044 struct si_info *sii;
2045 uint origidx;
2046 struct chipcregs __iomem *cc;
2047 u32 sromctrl;
2048
2049 if ((sih->cccaps & CC_CAP_SROM) == 0)
2050 return false;
2051
2052 sii = (struct si_info *)sih;
2053 origidx = sii->curidx;
2054 cc = ai_setcoreidx(sih, SI_CC_IDX);
2055 sromctrl = R_REG(&cc->sromcontrol);
2056 ai_setcoreidx(sih, origidx);
2057 return sromctrl & SRC_PRESENT;
2058 }
2059
2060 switch (sih->chip) {
2061 case BCM4313_CHIP_ID:
2062 return (sih->chipst & CST4313_SPROM_PRESENT) != 0;
2063 default:
2064 return true;
2065 }
2066}
2067
2068bool ai_is_otp_disabled(struct si_pub *sih)
2069{
2070 switch (sih->chip) {
2071 case BCM4313_CHIP_ID:
2072 return (sih->chipst & CST4313_OTP_PRESENT) == 0;
2073 /* These chips always have their OTP on */
2074 case BCM43224_CHIP_ID:
2075 case BCM43225_CHIP_ID:
2076 default:
2077 return false;
2078 }
2079}