Merge tag 'intc-part2-for-v3.18' of git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap into next/drivers
Merge "part 2 of omap intc changes" from Tony Lindgren:
Second part of omap intc interrupt controller changes to
move it to drivers.
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
* tag 'intc-part2-for-v3.18' of git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap:
irqchip: omap-intc: remove unnecessary comments
irqchip: omap-intc: correct maximum number or MIR registers
irqchip: omap-intc: enable TURBO idle mode
irqchip: omap-intc: enable IP protection
irqchip: omap-intc: remove unnecesary of_address_to_resource() call
irqchip: omap-intc: comment style cleanup
irqchip: omap-intc: minor improvement to omap_irq_pending()
arm: omap: irq: move irq.c to drivers/irqchip/
irqchip: add irq-omap-intc.h header
arm: omap2: n8x0: move i2c devices to DT
diff --git a/Documentation/devicetree/bindings/arm/atmel-at91.txt b/Documentation/devicetree/bindings/arm/atmel-at91.txt
index 16f60b4..6e3e3e5 100644
--- a/Documentation/devicetree/bindings/arm/atmel-at91.txt
+++ b/Documentation/devicetree/bindings/arm/atmel-at91.txt
@@ -61,8 +61,8 @@
- compatible: Should be "atmel,at91rm9200-sdramc",
"atmel,at91sam9260-sdramc",
"atmel,at91sam9g45-ddramc",
+ "atmel,sama5d3-ddramc",
- reg: Should contain registers location and length
- For at91sam9263 and at91sam9g45 you must specify 2 entries.
Examples:
@@ -71,12 +71,6 @@
reg = <0xffffe800 0x200>;
};
- ramc0: ramc@ffffe400 {
- compatible = "atmel,at91sam9g45-ddramc";
- reg = <0xffffe400 0x200
- 0xffffe600 0x200>;
- };
-
SHDWC Shutdown Controller
required properties:
diff --git a/Documentation/devicetree/bindings/mailbox/omap-mailbox.txt b/Documentation/devicetree/bindings/mailbox/omap-mailbox.txt
new file mode 100644
index 0000000..48edc4b
--- /dev/null
+++ b/Documentation/devicetree/bindings/mailbox/omap-mailbox.txt
@@ -0,0 +1,108 @@
+OMAP2+ Mailbox Driver
+=====================
+
+The OMAP mailbox hardware facilitates communication between different processors
+using a queued mailbox interrupt mechanism. The IP block is external to the
+various processor subsystems and is connected on an interconnect bus. The
+communication is achieved through a set of registers for message storage and
+interrupt configuration registers.
+
+Each mailbox IP block has a certain number of h/w fifo queues and output
+interrupt lines. An output interrupt line is routed to an interrupt controller
+within a processor subsystem, and there can be more than one line going to a
+specific processor's interrupt controller. The interrupt line connections are
+fixed for an instance and are dictated by the IP integration into the SoC
+(excluding the SoCs that have a Interrupt Crossbar IP). Each interrupt line is
+programmable through a set of interrupt configuration registers, and have a rx
+and tx interrupt source per h/w fifo. Communication between different processors
+is achieved through the appropriate programming of the rx and tx interrupt
+sources on the appropriate interrupt lines.
+
+The number of h/w fifo queues and interrupt lines dictate the usable registers.
+All the current OMAP SoCs except for the newest DRA7xx SoC has a single IP
+instance. DRA7xx has multiple instances with different number of h/w fifo queues
+and interrupt lines between different instances. The interrupt lines can also be
+routed to different processor sub-systems on DRA7xx as they are routed through
+the Crossbar, a kind of interrupt router/multiplexer.
+
+Mailbox Device Node:
+====================
+A Mailbox device node is used to represent a Mailbox IP instance within a SoC.
+The sub-mailboxes are represented as child nodes of this parent node.
+
+Required properties:
+--------------------
+- compatible: Should be one of the following,
+ "ti,omap2-mailbox" for OMAP2420, OMAP2430 SoCs
+ "ti,omap3-mailbox" for OMAP3430, OMAP3630 SoCs
+ "ti,omap4-mailbox" for OMAP44xx, OMAP54xx, AM33xx,
+ AM43xx and DRA7xx SoCs
+- reg: Contains the mailbox register address range (base
+ address and length)
+- interrupts: Contains the interrupt information for the mailbox
+ device. The format is dependent on which interrupt
+ controller the OMAP device uses
+- ti,hwmods: Name of the hwmod associated with the mailbox
+- ti,mbox-num-users: Number of targets (processor devices) that the mailbox
+ device can interrupt
+- ti,mbox-num-fifos: Number of h/w fifo queues within the mailbox IP block
+
+Child Nodes:
+============
+A child node is used for representing the actual sub-mailbox device that is
+used for the communication between the host processor and a remote processor.
+Each child node should have a unique node name across all the different
+mailbox device nodes.
+
+Required properties:
+--------------------
+- ti,mbox-tx: sub-mailbox descriptor property defining a Tx fifo
+- ti,mbox-rx: sub-mailbox descriptor property defining a Rx fifo
+
+Sub-mailbox Descriptor Data
+---------------------------
+Each of the above ti,mbox-tx and ti,mbox-rx properties should have 3 cells of
+data that represent the following:
+ Cell #1 (fifo_id) - mailbox fifo id used either for transmitting
+ (ti,mbox-tx) or for receiving (ti,mbox-rx)
+ Cell #2 (irq_id) - irq identifier index number to use from the parent's
+ interrupts data. Should be 0 for most of the cases, a
+ positive index value is seen only on mailboxes that have
+ multiple interrupt lines connected to the MPU processor.
+ Cell #3 (usr_id) - mailbox user id for identifying the interrupt line
+ associated with generating a tx/rx fifo interrupt.
+
+Example:
+--------
+
+/* OMAP4 */
+mailbox: mailbox@4a0f4000 {
+ compatible = "ti,omap4-mailbox";
+ reg = <0x4a0f4000 0x200>;
+ interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "mailbox";
+ ti,mbox-num-users = <3>;
+ ti,mbox-num-fifos = <8>;
+ mbox_ipu: mbox_ipu {
+ ti,mbox-tx = <0 0 0>;
+ ti,mbox-rx = <1 0 0>;
+ };
+ mbox_dsp: mbox_dsp {
+ ti,mbox-tx = <3 0 0>;
+ ti,mbox-rx = <2 0 0>;
+ };
+};
+
+/* AM33xx */
+mailbox: mailbox@480C8000 {
+ compatible = "ti,omap4-mailbox";
+ reg = <0x480C8000 0x200>;
+ interrupts = <77>;
+ ti,hwmods = "mailbox";
+ ti,mbox-num-users = <4>;
+ ti,mbox-num-fifos = <8>;
+ mbox_wkupm3: wkup_m3 {
+ ti,mbox-tx = <0 0 0>;
+ ti,mbox-rx = <0 0 3>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/rtc/sun6i-rtc.txt b/Documentation/devicetree/bindings/rtc/sun6i-rtc.txt
new file mode 100644
index 0000000..f007e42
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/sun6i-rtc.txt
@@ -0,0 +1,17 @@
+* sun6i Real Time Clock
+
+RTC controller for the Allwinner A31
+
+Required properties:
+- compatible : Should be "allwinner,sun6i-a31-rtc"
+- reg : physical base address of the controller and length of
+ memory mapped region.
+- interrupts : IRQ lines for the RTC alarm 0 and alarm 1, in that order.
+
+Example:
+
+rtc: rtc@01f00000 {
+ compatible = "allwinner,sun6i-a31-rtc";
+ reg = <0x01f00000 0x54>;
+ interrupts = <0 40 4>, <0 41 4>;
+};
diff --git a/Documentation/devicetree/bindings/soc/ti/keystone-navigator-dma.txt b/Documentation/devicetree/bindings/soc/ti/keystone-navigator-dma.txt
new file mode 100644
index 0000000..337c4ea
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/ti/keystone-navigator-dma.txt
@@ -0,0 +1,111 @@
+Keystone Navigator DMA Controller
+
+This document explains the device tree bindings for the packet dma
+on keystone devices. The Keystone Navigator DMA driver sets up the dma
+channels and flows for the QMSS(Queue Manager SubSystem) who triggers
+the actual data movements across clients using destination queues. Every
+client modules like NETCP(Network Coprocessor), SRIO(Serial Rapid IO),
+CRYPTO Engines etc has its own instance of dma hardware. QMSS has also
+an internal packet DMA module which is used as an infrastructure DMA
+with zero copy.
+
+Navigator DMA cloud layout:
+ ------------------
+ | Navigator DMAs |
+ ------------------
+ |
+ |-> DMA instance #0
+ |
+ |-> DMA instance #1
+ .
+ .
+ |
+ |-> DMA instance #n
+
+Navigator DMA properties:
+Required properties:
+ - compatible: Should be "ti,keystone-navigator-dma"
+ - clocks: phandle to dma instances clocks. The clock handles can be as
+ many as the dma instances. The order should be maintained as per
+ the dma instances.
+ - ti,navigator-cloud-address: Should contain base address for the multi-core
+ navigator cloud and number of addresses depends on SOC integration
+ configuration.. Navigator cloud global address needs to be programmed
+ into DMA and the DMA uses it as the physical addresses to reach queue
+ managers. Note that these addresses though points to queue managers,
+ they are relevant only from DMA perspective. The QMSS may not choose to
+ use them since it has a different address space view to reach all
+ its components.
+
+DMA instance properties:
+Required properties:
+ - reg: Should contain register location and length of the following dma
+ register regions. Register regions should be specified in the following
+ order.
+ - Global control register region (global).
+ - Tx DMA channel configuration register region (txchan).
+ - Rx DMA channel configuration register region (rxchan).
+ - Tx DMA channel Scheduler configuration register region (txsched).
+ - Rx DMA flow configuration register region (rxflow).
+
+Optional properties:
+ - reg-names: Names for the register regions.
+ - ti,enable-all: Enable all DMA channels vs clients opening specific channels
+ what they need. This property is useful for the userspace fast path
+ case where the linux drivers enables the channels used by userland
+ stack.
+ - ti,loop-back: To loopback Tx streaming I/F to Rx streaming I/F. Used for
+ infrastructure transfers.
+ - ti,rx-retry-timeout: Number of dma cycles to wait before retry on buffer
+ starvation.
+
+Example:
+
+ knav_dmas: knav_dmas@0 {
+ compatible = "ti,keystone-navigator-dma";
+ clocks = <&papllclk>, <&clkxge>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ ti,navigator-cloud-address = <0x23a80000 0x23a90000
+ 0x23aa0000 0x23ab0000>;
+
+ dma_gbe: dma_gbe@0 {
+ reg = <0x2004000 0x100>,
+ <0x2004400 0x120>,
+ <0x2004800 0x300>,
+ <0x2004c00 0x120>,
+ <0x2005000 0x400>;
+ reg-names = "global", "txchan", "rxchan",
+ "txsched", "rxflow";
+ };
+
+ dma_xgbe: dma_xgbe@0 {
+ reg = <0x2fa1000 0x100>,
+ <0x2fa1400 0x200>,
+ <0x2fa1800 0x200>,
+ <0x2fa1c00 0x200>,
+ <0x2fa2000 0x400>;
+ reg-names = "global", "txchan", "rxchan",
+ "txsched", "rxflow";
+ };
+ };
+
+Navigator DMA client:
+Required properties:
+ - ti,navigator-dmas: List of one or more DMA specifiers, each consisting of
+ - A phandle pointing to DMA instance node
+ - A DMA channel number as a phandle arg.
+ - ti,navigator-dma-names: Contains dma channel name for each DMA specifier in
+ the 'ti,navigator-dmas' property.
+
+Example:
+
+ netcp: netcp@2090000 {
+ ..
+ ti,navigator-dmas = <&dma_gbe 22>,
+ <&dma_gbe 23>,
+ <&dma_gbe 8>;
+ ti,navigator-dma-names = "netrx0", "netrx1", "nettx";
+ ..
+ };
diff --git a/Documentation/devicetree/bindings/soc/ti/keystone-navigator-qmss.txt b/Documentation/devicetree/bindings/soc/ti/keystone-navigator-qmss.txt
new file mode 100644
index 0000000..d8e8cdb
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/ti/keystone-navigator-qmss.txt
@@ -0,0 +1,232 @@
+* Texas Instruments Keystone Navigator Queue Management SubSystem driver
+
+The QMSS (Queue Manager Sub System) found on Keystone SOCs is one of
+the main hardware sub system which forms the backbone of the Keystone
+multi-core Navigator. QMSS consist of queue managers, packed-data structure
+processors(PDSP), linking RAM, descriptor pools and infrastructure
+Packet DMA.
+The Queue Manager is a hardware module that is responsible for accelerating
+management of the packet queues. Packets are queued/de-queued by writing or
+reading descriptor address to a particular memory mapped location. The PDSPs
+perform QMSS related functions like accumulation, QoS, or event management.
+Linking RAM registers are used to link the descriptors which are stored in
+descriptor RAM. Descriptor RAM is configurable as internal or external memory.
+The QMSS driver manages the PDSP setups, linking RAM regions,
+queue pool management (allocation, push, pop and notify) and descriptor
+pool management.
+
+
+Required properties:
+- compatible : Must be "ti,keystone-navigator-qmss";
+- clocks : phandle to the reference clock for this device.
+- queue-range : <start number> total range of queue numbers for the device.
+- linkram0 : <address size> for internal link ram, where size is the total
+ link ram entries.
+- linkram1 : <address size> for external link ram, where size is the total
+ external link ram entries. If the address is specified as "0"
+ driver will allocate memory.
+- qmgrs : child node describing the individual queue managers on the
+ SoC. On keystone 1 devices there should be only one node.
+ On keystone 2 devices there can be more than 1 node.
+ -- managed-queues : the actual queues managed by each queue manager
+ instance, specified as <"base queue #" "# of queues">.
+ -- reg : Address and size of the register set for the device.
+ Register regions should be specified in the following
+ order
+ - Queue Peek region.
+ - Queue status RAM.
+ - Queue configuration region.
+ - Descriptor memory setup region.
+ - Queue Management/Queue Proxy region for queue Push.
+ - Queue Management/Queue Proxy region for queue Pop.
+- queue-pools : child node classifying the queue ranges into pools.
+ Queue ranges are grouped into 3 type of pools:
+ - qpend : pool of qpend(interruptible) queues
+ - general-purpose : pool of general queues, primarly used
+ as free descriptor queues or the
+ transmit DMA queues.
+ - accumulator : pool of queues on PDSP accumulator channel
+ Each range can have the following properties:
+ -- qrange : number of queues to use per queue range, specified as
+ <"base queue #" "# of queues">.
+ -- interrupts : Optional property to specify the interrupt mapping
+ for interruptible queues. The driver additionaly sets
+ the interrupt affinity hint based on the cpu mask.
+ -- qalloc-by-id : Optional property to specify that the queues in this
+ range can only be allocated by queue id.
+ -- accumulator : Accumulator channel specification. Any of the PDSPs in
+ QMSS can be loaded with the accumulator firmware. The
+ accumulator firmware’s job is to poll a select number of
+ queues looking for descriptors that have been pushed
+ into them. Descriptors are popped from the queue and
+ placed in a buffer provided by the host. When the list
+ becomes full or a programmed time period expires, the
+ accumulator triggers an interrupt to the host to read
+ the buffer for descriptor information. This firmware
+ comes in 16, 32, and 48 channel builds. Each of these
+ channels can be configured to monitor 32 contiguous
+ queues. Accumulator channel property is specified as:
+ <pdsp-id, channel, entries, pacing mode, latency>
+ pdsp-id : QMSS PDSP running accumulator firmware
+ on which the channel has to be
+ configured
+ channel : Accumulator channel number
+ entries : Size of the accumulator descriptor list
+ pacing mode : Interrupt pacing mode
+ 0 : None, i.e interrupt on list full only
+ 1 : Time delay since last interrupt
+ 2 : Time delay since first new packet
+ 3 : Time delay since last new packet
+ latency : time to delay the interrupt, specified
+ in microseconds.
+ -- multi-queue : Optional property to specify that the channel has to
+ monitor upto 32 queues starting at the base queue #.
+- descriptor-regions : child node describing the memory regions for keystone
+ navigator packet DMA descriptors. The memory for
+ descriptors will be allocated by the driver.
+ -- id : region number in QMSS.
+ -- region-spec : specifies the number of descriptors in the
+ region, specified as
+ <"# of descriptors" "descriptor size">.
+ -- link-index : start index, i.e. index of the first
+ descriptor in the region.
+
+Optional properties:
+- dma-coherent : Present if DMA operations are coherent.
+- pdsps : child node describing the PDSP configuration.
+ -- firmware : firmware to be loaded on the PDSP.
+ -- id : the qmss pdsp that will run the firmware.
+ -- reg : Address and size of the register set for the PDSP.
+ Register regions should be specified in the following
+ order
+ - PDSP internal RAM region.
+ - PDSP control/status region registers.
+ - QMSS interrupt distributor registers.
+ - PDSP command interface region.
+
+Example:
+
+qmss: qmss@2a40000 {
+ compatible = "ti,keystone-qmss";
+ dma-coherent;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ clocks = <&chipclk13>;
+ ranges;
+ queue-range = <0 0x4000>;
+ linkram0 = <0x100000 0x8000>;
+ linkram1 = <0x0 0x10000>;
+
+ qmgrs {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ qmgr0 {
+ managed-queues = <0 0x2000>;
+ reg = <0x2a40000 0x20000>,
+ <0x2a06000 0x400>,
+ <0x2a02000 0x1000>,
+ <0x2a03000 0x1000>,
+ <0x23a80000 0x20000>,
+ <0x2a80000 0x20000>;
+ };
+
+ qmgr1 {
+ managed-queues = <0x2000 0x2000>;
+ reg = <0x2a60000 0x20000>,
+ <0x2a06400 0x400>,
+ <0x2a04000 0x1000>,
+ <0x2a05000 0x1000>,
+ <0x23aa0000 0x20000>,
+ <0x2aa0000 0x20000>;
+ };
+ };
+ queue-pools {
+ qpend {
+ qpend-0 {
+ qrange = <658 8>;
+ interrupts =<0 40 0xf04 0 41 0xf04 0 42 0xf04
+ 0 43 0xf04 0 44 0xf04 0 45 0xf04
+ 0 46 0xf04 0 47 0xf04>;
+ };
+ qpend-1 {
+ qrange = <8704 16>;
+ interrupts = <0 48 0xf04 0 49 0xf04 0 50 0xf04
+ 0 51 0xf04 0 52 0xf04 0 53 0xf04
+ 0 54 0xf04 0 55 0xf04 0 56 0xf04
+ 0 57 0xf04 0 58 0xf04 0 59 0xf04
+ 0 60 0xf04 0 61 0xf04 0 62 0xf04
+ 0 63 0xf04>;
+ qalloc-by-id;
+ };
+ qpend-2 {
+ qrange = <8720 16>;
+ interrupts = <0 64 0xf04 0 65 0xf04 0 66 0xf04
+ 0 59 0xf04 0 68 0xf04 0 69 0xf04
+ 0 70 0xf04 0 71 0xf04 0 72 0xf04
+ 0 73 0xf04 0 74 0xf04 0 75 0xf04
+ 0 76 0xf04 0 77 0xf04 0 78 0xf04
+ 0 79 0xf04>;
+ };
+ };
+ general-purpose {
+ gp-0 {
+ qrange = <4000 64>;
+ };
+ netcp-tx {
+ qrange = <640 9>;
+ qalloc-by-id;
+ };
+ };
+ accumulator {
+ acc-0 {
+ qrange = <128 32>;
+ accumulator = <0 36 16 2 50>;
+ interrupts = <0 215 0xf01>;
+ multi-queue;
+ qalloc-by-id;
+ };
+ acc-1 {
+ qrange = <160 32>;
+ accumulator = <0 37 16 2 50>;
+ interrupts = <0 216 0xf01>;
+ multi-queue;
+ };
+ acc-2 {
+ qrange = <192 32>;
+ accumulator = <0 38 16 2 50>;
+ interrupts = <0 217 0xf01>;
+ multi-queue;
+ };
+ acc-3 {
+ qrange = <224 32>;
+ accumulator = <0 39 16 2 50>;
+ interrupts = <0 218 0xf01>;
+ multi-queue;
+ };
+ };
+ };
+ descriptor-regions {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ region-12 {
+ id = <12>;
+ region-spec = <8192 128>; /* num_desc desc_size */
+ link-index = <0x4000>;
+ };
+ };
+ pdsps {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ pdsp0@0x2a10000 {
+ firmware = "keystone/qmss_pdsp_acc48_k2_le_1_0_0_8.fw";
+ reg = <0x2a10000 0x1000>,
+ <0x2a0f000 0x100>,
+ <0x2a0c000 0x3c8>,
+ <0x2a20000 0x4000>;
+ id = <0>;
+ };
+ };
+}; /* qmss */
diff --git a/MAINTAINERS b/MAINTAINERS
index cf24bb5..08f6277 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1381,6 +1381,11 @@
S: Maintained
F: drivers/clk/socfpga/
+ARM/SOCFPGA EDAC SUPPORT
+M: Thor Thayer <tthayer@opensource.altera.com>
+S: Maintained
+F: drivers/edac/altera_edac.
+
ARM/STI ARCHITECTURE
M: Srinivas Kandagatla <srinivas.kandagatla@gmail.com>
M: Maxime Coquelin <maxime.coquelin@st.com>
@@ -9133,6 +9138,15 @@
F: drivers/mmc/host/tifm_sd.c
F: include/linux/tifm.h
+TI KEYSTONE MULTICORE NAVIGATOR DRIVERS
+M: Santosh Shilimkar <santosh.shilimkar@ti.com>
+L: linux-kernel@vger.kernel.org
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+F: drivers/soc/ti/*
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git
+
+
TI LM49xxx FAMILY ASoC CODEC DRIVERS
M: M R Swami Reddy <mr.swami.reddy@ti.com>
M: Vishwas A Deshpande <vishwas.a.deshpande@ti.com>
diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi
index bb23c2d..840958b 100644
--- a/arch/arm/boot/dts/at91sam9263.dtsi
+++ b/arch/arm/boot/dts/at91sam9263.dtsi
@@ -345,10 +345,14 @@
};
};
- ramc: ramc@ffffe200 {
+ ramc0: ramc@ffffe200 {
compatible = "atmel,at91sam9260-sdramc";
- reg = <0xffffe200 0x200
- 0xffffe800 0x200>;
+ reg = <0xffffe200 0x200>;
+ };
+
+ ramc1: ramc@ffffe800 {
+ compatible = "atmel,at91sam9260-sdramc";
+ reg = <0xffffe800 0x200>;
};
pit: timer@fffffd30 {
diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi
index 932a669..857fd3e 100644
--- a/arch/arm/boot/dts/at91sam9g45.dtsi
+++ b/arch/arm/boot/dts/at91sam9g45.dtsi
@@ -96,8 +96,14 @@
ramc0: ramc@ffffe400 {
compatible = "atmel,at91sam9g45-ddramc";
- reg = <0xffffe400 0x200
- 0xffffe600 0x200>;
+ reg = <0xffffe400 0x200>;
+ clocks = <&ddrck>;
+ clock-names = "ddrck";
+ };
+
+ ramc1: ramc@ffffe600 {
+ compatible = "atmel,at91sam9g45-ddramc";
+ reg = <0xffffe600 0x200>;
clocks = <&ddrck>;
clock-names = "ddrck";
};
diff --git a/arch/arm/boot/dts/at91sam9n12.dtsi b/arch/arm/boot/dts/at91sam9n12.dtsi
index 2bfac31..68eb9ad 100644
--- a/arch/arm/boot/dts/at91sam9n12.dtsi
+++ b/arch/arm/boot/dts/at91sam9n12.dtsi
@@ -87,6 +87,8 @@
ramc0: ramc@ffffe800 {
compatible = "atmel,at91sam9g45-ddramc";
reg = <0xffffe800 0x200>;
+ clocks = <&ddrck>;
+ clock-names = "ddrck";
};
pmc: pmc@fffffc00 {
diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
index e1a5c70..765a1e3 100644
--- a/arch/arm/boot/dts/at91sam9x5.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5.dtsi
@@ -95,6 +95,8 @@
ramc0: ramc@ffffe800 {
compatible = "atmel,at91sam9g45-ddramc";
reg = <0xffffe800 0x200>;
+ clocks = <&ddrck>;
+ clock-names = "ddrck";
};
pmc: pmc@fffffc00 {
diff --git a/arch/arm/boot/dts/sama5d3.dtsi b/arch/arm/boot/dts/sama5d3.dtsi
index 45013b8..7702a0d 100644
--- a/arch/arm/boot/dts/sama5d3.dtsi
+++ b/arch/arm/boot/dts/sama5d3.dtsi
@@ -402,8 +402,10 @@
};
ramc0: ramc@ffffea00 {
- compatible = "atmel,at91sam9g45-ddramc";
+ compatible = "atmel,sama5d3-ddramc";
reg = <0xffffea00 0x200>;
+ clocks = <&ddrck>, <&mpddr_clk>;
+ clock-names = "ddrck", "mpddr";
};
dbgu: serial@ffffee00 {
@@ -1170,6 +1172,11 @@
#clock-cells = <0>;
reg = <48>;
};
+
+ mpddr_clk: mpddr_clk {
+ #clock-cells = <0>;
+ reg = <49>;
+ };
};
};
@@ -1178,6 +1185,11 @@
reg = <0xfffffe00 0x10>;
};
+ shutdown-controller@fffffe10 {
+ compatible = "atmel,at91sam9x5-shdwc";
+ reg = <0xfffffe10 0x10>;
+ };
+
pit: timer@fffffe30 {
compatible = "atmel,at91sam9260-pit";
reg = <0xfffffe30 0xf>;
diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig
index 6cc6f7a..474c855 100644
--- a/arch/arm/mach-at91/Kconfig
+++ b/arch/arm/mach-at91/Kconfig
@@ -28,36 +28,30 @@
bool
default AT91_PMC_UNIT && AT91_USE_OLD_CLK
-config AT91_SAM9_ALT_RESET
+config OLD_IRQ_AT91
bool
- default !ARCH_AT91X40
-
-config AT91_SAM9G45_RESET
- bool
- default !ARCH_AT91X40
-
-config AT91_SAM9_TIME
- bool
+ select MULTI_IRQ_HANDLER
+ select SPARSE_IRQ
config HAVE_AT91_SMD
bool
config SOC_AT91SAM9
bool
- select AT91_SAM9_TIME
+ select ATMEL_AIC_IRQ if !OLD_IRQ_AT91
select CPU_ARM926T
select GENERIC_CLOCKEVENTS
- select MULTI_IRQ_HANDLER
- select SPARSE_IRQ
+ select MEMORY if USE_OF
+ select ATMEL_SDRAMC if USE_OF
config SOC_SAMA5
bool
- select AT91_SAM9_TIME
+ select ATMEL_AIC5_IRQ
select CPU_V7
select GENERIC_CLOCKEVENTS
- select MULTI_IRQ_HANDLER
- select SPARSE_IRQ
select USE_OF
+ select MEMORY
+ select ATMEL_SDRAMC
menu "Atmel AT91 System-on-Chip"
@@ -70,8 +64,7 @@
depends on !MMU
select CPU_ARM7TDMI
select ARCH_USES_GETTIMEOFFSET
- select MULTI_IRQ_HANDLER
- select SPARSE_IRQ
+ select OLD_IRQ_AT91
help
Select this if you are using one of Atmel's AT91X40 SoC.
@@ -108,11 +101,10 @@
if SOC_SAM_V4_V5
config SOC_AT91RM9200
bool "AT91RM9200"
+ select ATMEL_AIC_IRQ if !OLD_IRQ_AT91
select CPU_ARM920T
select GENERIC_CLOCKEVENTS
select HAVE_AT91_DBGU0
- select MULTI_IRQ_HANDLER
- select SPARSE_IRQ
select HAVE_AT91_USB_CLK
config SOC_AT91SAM9260
diff --git a/arch/arm/mach-at91/Kconfig.non_dt b/arch/arm/mach-at91/Kconfig.non_dt
index 44ace32..b774c3d 100644
--- a/arch/arm/mach-at91/Kconfig.non_dt
+++ b/arch/arm/mach-at91/Kconfig.non_dt
@@ -14,31 +14,37 @@
bool "AT91RM9200"
select SOC_AT91RM9200
select AT91_USE_OLD_CLK
+ select OLD_IRQ_AT91
config ARCH_AT91SAM9260
bool "AT91SAM9260 or AT91SAM9XE or AT91SAM9G20"
select SOC_AT91SAM9260
select AT91_USE_OLD_CLK
+ select OLD_IRQ_AT91
config ARCH_AT91SAM9261
bool "AT91SAM9261 or AT91SAM9G10"
select SOC_AT91SAM9261
select AT91_USE_OLD_CLK
+ select OLD_IRQ_AT91
config ARCH_AT91SAM9263
bool "AT91SAM9263"
select SOC_AT91SAM9263
select AT91_USE_OLD_CLK
+ select OLD_IRQ_AT91
config ARCH_AT91SAM9RL
bool "AT91SAM9RL"
select SOC_AT91SAM9RL
select AT91_USE_OLD_CLK
+ select OLD_IRQ_AT91
config ARCH_AT91SAM9G45
bool "AT91SAM9G45"
select SOC_AT91SAM9G45
select AT91_USE_OLD_CLK
+ select OLD_IRQ_AT91
endchoice
diff --git a/arch/arm/mach-at91/Makefile b/arch/arm/mach-at91/Makefile
index 78e9cec..61d04f9 100644
--- a/arch/arm/mach-at91/Makefile
+++ b/arch/arm/mach-at91/Makefile
@@ -2,15 +2,13 @@
# Makefile for the linux kernel.
#
-obj-y := irq.o gpio.o setup.o sysirq_mask.o
+obj-y := gpio.o setup.o sysirq_mask.o
obj-m :=
obj-n :=
obj- :=
+obj-$(CONFIG_OLD_IRQ_AT91) += irq.o
obj-$(CONFIG_OLD_CLK_AT91) += clock.o
-obj-$(CONFIG_AT91_SAM9_ALT_RESET) += at91sam9_alt_reset.o
-obj-$(CONFIG_AT91_SAM9G45_RESET) += at91sam9g45_reset.o
-obj-$(CONFIG_AT91_SAM9_TIME) += at91sam926x_time.o
obj-$(CONFIG_SOC_AT91SAM9) += sam9_smc.o
# CPU-specific support
diff --git a/arch/arm/mach-at91/at91_rstc.h b/arch/arm/mach-at91/at91_rstc.h
deleted file mode 100644
index a600e69..0000000
--- a/arch/arm/mach-at91/at91_rstc.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * arch/arm/mach-at91/include/mach/at91_rstc.h
- *
- * Copyright (C) 2007 Andrew Victor
- * Copyright (C) 2007 Atmel Corporation.
- *
- * Reset Controller (RSTC) - System peripherals regsters.
- * Based on AT91SAM9261 datasheet revision D.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef AT91_RSTC_H
-#define AT91_RSTC_H
-
-#ifndef __ASSEMBLY__
-extern void __iomem *at91_rstc_base;
-
-#define at91_rstc_read(field) \
- __raw_readl(at91_rstc_base + field)
-
-#define at91_rstc_write(field, value) \
- __raw_writel(value, at91_rstc_base + field)
-#else
-.extern at91_rstc_base
-#endif
-
-#define AT91_RSTC_CR 0x00 /* Reset Controller Control Register */
-#define AT91_RSTC_PROCRST (1 << 0) /* Processor Reset */
-#define AT91_RSTC_PERRST (1 << 2) /* Peripheral Reset */
-#define AT91_RSTC_EXTRST (1 << 3) /* External Reset */
-#define AT91_RSTC_KEY (0xa5 << 24) /* KEY Password */
-
-#define AT91_RSTC_SR 0x04 /* Reset Controller Status Register */
-#define AT91_RSTC_URSTS (1 << 0) /* User Reset Status */
-#define AT91_RSTC_RSTTYP (7 << 8) /* Reset Type */
-#define AT91_RSTC_RSTTYP_GENERAL (0 << 8)
-#define AT91_RSTC_RSTTYP_WAKEUP (1 << 8)
-#define AT91_RSTC_RSTTYP_WATCHDOG (2 << 8)
-#define AT91_RSTC_RSTTYP_SOFTWARE (3 << 8)
-#define AT91_RSTC_RSTTYP_USER (4 << 8)
-#define AT91_RSTC_NRSTL (1 << 16) /* NRST Pin Level */
-#define AT91_RSTC_SRCMP (1 << 17) /* Software Reset Command in Progress */
-
-#define AT91_RSTC_MR 0x08 /* Reset Controller Mode Register */
-#define AT91_RSTC_URSTEN (1 << 0) /* User Reset Enable */
-#define AT91_RSTC_URSTIEN (1 << 4) /* User Reset Interrupt Enable */
-#define AT91_RSTC_ERSTL (0xf << 8) /* External Reset Length */
-
-#endif
diff --git a/arch/arm/mach-at91/at91_shdwc.h b/arch/arm/mach-at91/at91_shdwc.h
deleted file mode 100644
index 9e29f31..0000000
--- a/arch/arm/mach-at91/at91_shdwc.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * arch/arm/mach-at91/include/mach/at91_shdwc.h
- *
- * Copyright (C) 2007 Andrew Victor
- * Copyright (C) 2007 Atmel Corporation.
- *
- * Shutdown Controller (SHDWC) - System peripherals regsters.
- * Based on AT91SAM9261 datasheet revision D.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef AT91_SHDWC_H
-#define AT91_SHDWC_H
-
-#ifndef __ASSEMBLY__
-extern void __iomem *at91_shdwc_base;
-
-#define at91_shdwc_read(field) \
- __raw_readl(at91_shdwc_base + field)
-
-#define at91_shdwc_write(field, value) \
- __raw_writel(value, at91_shdwc_base + field)
-#endif
-
-#define AT91_SHDW_CR 0x00 /* Shut Down Control Register */
-#define AT91_SHDW_SHDW (1 << 0) /* Shut Down command */
-#define AT91_SHDW_KEY (0xa5 << 24) /* KEY Password */
-
-#define AT91_SHDW_MR 0x04 /* Shut Down Mode Register */
-#define AT91_SHDW_WKMODE0 (3 << 0) /* Wake-up 0 Mode Selection */
-#define AT91_SHDW_WKMODE0_NONE 0
-#define AT91_SHDW_WKMODE0_HIGH 1
-#define AT91_SHDW_WKMODE0_LOW 2
-#define AT91_SHDW_WKMODE0_ANYLEVEL 3
-#define AT91_SHDW_CPTWK0_MAX 0xf /* Maximum Counter On Wake Up 0 */
-#define AT91_SHDW_CPTWK0 (AT91_SHDW_CPTWK0_MAX << 4) /* Counter On Wake Up 0 */
-#define AT91_SHDW_CPTWK0_(x) ((x) << 4)
-#define AT91_SHDW_RTTWKEN (1 << 16) /* Real Time Timer Wake-up Enable */
-#define AT91_SHDW_RTCWKEN (1 << 17) /* Real Time Clock Wake-up Enable */
-
-#define AT91_SHDW_SR 0x08 /* Shut Down Status Register */
-#define AT91_SHDW_WAKEUP0 (1 << 0) /* Wake-up 0 Status */
-#define AT91_SHDW_RTTWK (1 << 16) /* Real-time Timer Wake-up */
-#define AT91_SHDW_RTCWK (1 << 17) /* Real-time Clock Wake-up [SAM9RL] */
-
-#endif
diff --git a/arch/arm/mach-at91/at91sam9260.c b/arch/arm/mach-at91/at91sam9260.c
index 3477ba9..aab1f96 100644
--- a/arch/arm/mach-at91/at91sam9260.c
+++ b/arch/arm/mach-at91/at91sam9260.c
@@ -11,6 +11,7 @@
*/
#include <linux/module.h>
+#include <linux/platform_device.h>
#include <linux/clk/at91_pmc.h>
#include <asm/proc-fns.h>
@@ -24,7 +25,6 @@
#include <mach/hardware.h>
#include "at91_aic.h"
-#include "at91_rstc.h"
#include "soc.h"
#include "generic.h"
#include "sam9_smc.h"
@@ -342,8 +342,6 @@
static void __init at91sam9260_ioremap_registers(void)
{
- at91_ioremap_shdwc(AT91SAM9260_BASE_SHDWC);
- at91_ioremap_rstc(AT91SAM9260_BASE_RSTC);
at91_ioremap_ramc(0, AT91SAM9260_BASE_SDRAMC, 512);
at91sam926x_ioremap_pit(AT91SAM9260_BASE_PIT);
at91sam9_ioremap_smc(0, AT91SAM9260_BASE_SMC);
@@ -354,7 +352,6 @@
static void __init at91sam9260_initialize(void)
{
arm_pm_idle = at91sam9_idle;
- arm_pm_restart = at91sam9_alt_restart;
at91_sysirq_mask_rtt(AT91SAM9260_BASE_RTT);
@@ -362,6 +359,45 @@
at91_gpio_init(at91sam9260_gpio, 3);
}
+static struct resource rstc_resources[] = {
+ [0] = {
+ .start = AT91SAM9260_BASE_RSTC,
+ .end = AT91SAM9260_BASE_RSTC + SZ_16 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AT91SAM9260_BASE_SDRAMC,
+ .end = AT91SAM9260_BASE_SDRAMC + SZ_512 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device rstc_device = {
+ .name = "at91-sam9260-reset",
+ .resource = rstc_resources,
+ .num_resources = ARRAY_SIZE(rstc_resources),
+};
+
+static struct resource shdwc_resources[] = {
+ [0] = {
+ .start = AT91SAM9260_BASE_SHDWC,
+ .end = AT91SAM9260_BASE_SHDWC + SZ_16 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device shdwc_device = {
+ .name = "at91-poweroff",
+ .resource = shdwc_resources,
+ .num_resources = ARRAY_SIZE(shdwc_resources),
+};
+
+static void __init at91sam9260_register_devices(void)
+{
+ platform_device_register(&rstc_device);
+ platform_device_register(&shdwc_device);
+}
+
/* --------------------------------------------------------------------
* Interrupt initialization
* -------------------------------------------------------------------- */
@@ -404,6 +440,11 @@
0, /* Advanced Interrupt Controller */
};
+static void __init at91sam9260_init_time(void)
+{
+ at91sam926x_pit_init(NR_IRQS_LEGACY + AT91_ID_SYS);
+}
+
AT91_SOC_START(at91sam9260)
.map_io = at91sam9260_map_io,
.default_irq_priority = at91sam9260_default_irq_priority,
@@ -411,5 +452,7 @@
| (1 << AT91SAM9260_ID_IRQ2),
.ioremap_registers = at91sam9260_ioremap_registers,
.register_clocks = at91sam9260_register_clocks,
+ .register_devices = at91sam9260_register_devices,
.init = at91sam9260_initialize,
+ .init_time = at91sam9260_init_time,
AT91_SOC_END
diff --git a/arch/arm/mach-at91/at91sam9261.c b/arch/arm/mach-at91/at91sam9261.c
index fb164a5..a8bd359 100644
--- a/arch/arm/mach-at91/at91sam9261.c
+++ b/arch/arm/mach-at91/at91sam9261.c
@@ -11,6 +11,7 @@
*/
#include <linux/module.h>
+#include <linux/platform_device.h>
#include <linux/clk/at91_pmc.h>
#include <asm/proc-fns.h>
@@ -23,7 +24,6 @@
#include <mach/hardware.h>
#include "at91_aic.h"
-#include "at91_rstc.h"
#include "soc.h"
#include "generic.h"
#include "sam9_smc.h"
@@ -301,8 +301,6 @@
static void __init at91sam9261_ioremap_registers(void)
{
- at91_ioremap_shdwc(AT91SAM9261_BASE_SHDWC);
- at91_ioremap_rstc(AT91SAM9261_BASE_RSTC);
at91_ioremap_ramc(0, AT91SAM9261_BASE_SDRAMC, 512);
at91sam926x_ioremap_pit(AT91SAM9261_BASE_PIT);
at91sam9_ioremap_smc(0, AT91SAM9261_BASE_SMC);
@@ -313,7 +311,6 @@
static void __init at91sam9261_initialize(void)
{
arm_pm_idle = at91sam9_idle;
- arm_pm_restart = at91sam9_alt_restart;
at91_sysirq_mask_rtt(AT91SAM9261_BASE_RTT);
@@ -321,6 +318,45 @@
at91_gpio_init(at91sam9261_gpio, 3);
}
+static struct resource rstc_resources[] = {
+ [0] = {
+ .start = AT91SAM9261_BASE_RSTC,
+ .end = AT91SAM9261_BASE_RSTC + SZ_16 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AT91SAM9261_BASE_SDRAMC,
+ .end = AT91SAM9261_BASE_SDRAMC + SZ_512 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device rstc_device = {
+ .name = "at91-sam9260-reset",
+ .resource = rstc_resources,
+ .num_resources = ARRAY_SIZE(rstc_resources),
+};
+
+static struct resource shdwc_resources[] = {
+ [0] = {
+ .start = AT91SAM9261_BASE_SHDWC,
+ .end = AT91SAM9261_BASE_SHDWC + SZ_16 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device shdwc_device = {
+ .name = "at91-poweroff",
+ .resource = shdwc_resources,
+ .num_resources = ARRAY_SIZE(shdwc_resources),
+};
+
+static void __init at91sam9261_register_devices(void)
+{
+ platform_device_register(&rstc_device);
+ platform_device_register(&shdwc_device);
+}
+
/* --------------------------------------------------------------------
* Interrupt initialization
* -------------------------------------------------------------------- */
@@ -363,6 +399,11 @@
0, /* Advanced Interrupt Controller */
};
+static void __init at91sam9261_init_time(void)
+{
+ at91sam926x_pit_init(NR_IRQS_LEGACY + AT91_ID_SYS);
+}
+
AT91_SOC_START(at91sam9261)
.map_io = at91sam9261_map_io,
.default_irq_priority = at91sam9261_default_irq_priority,
@@ -370,5 +411,7 @@
| (1 << AT91SAM9261_ID_IRQ2),
.ioremap_registers = at91sam9261_ioremap_registers,
.register_clocks = at91sam9261_register_clocks,
+ .register_devices = at91sam9261_register_devices,
.init = at91sam9261_initialize,
+ .init_time = at91sam9261_init_time,
AT91_SOC_END
diff --git a/arch/arm/mach-at91/at91sam9263.c b/arch/arm/mach-at91/at91sam9263.c
index 810fa5f..fbff228 100644
--- a/arch/arm/mach-at91/at91sam9263.c
+++ b/arch/arm/mach-at91/at91sam9263.c
@@ -11,6 +11,7 @@
*/
#include <linux/module.h>
+#include <linux/platform_device.h>
#include <linux/clk/at91_pmc.h>
#include <asm/proc-fns.h>
@@ -22,7 +23,6 @@
#include <mach/hardware.h>
#include "at91_aic.h"
-#include "at91_rstc.h"
#include "soc.h"
#include "generic.h"
#include "sam9_smc.h"
@@ -321,8 +321,6 @@
static void __init at91sam9263_ioremap_registers(void)
{
- at91_ioremap_shdwc(AT91SAM9263_BASE_SHDWC);
- at91_ioremap_rstc(AT91SAM9263_BASE_RSTC);
at91_ioremap_ramc(0, AT91SAM9263_BASE_SDRAMC0, 512);
at91_ioremap_ramc(1, AT91SAM9263_BASE_SDRAMC1, 512);
at91sam926x_ioremap_pit(AT91SAM9263_BASE_PIT);
@@ -335,7 +333,6 @@
static void __init at91sam9263_initialize(void)
{
arm_pm_idle = at91sam9_idle;
- arm_pm_restart = at91sam9_alt_restart;
at91_sysirq_mask_rtt(AT91SAM9263_BASE_RTT0);
at91_sysirq_mask_rtt(AT91SAM9263_BASE_RTT1);
@@ -344,6 +341,45 @@
at91_gpio_init(at91sam9263_gpio, 5);
}
+static struct resource rstc_resources[] = {
+ [0] = {
+ .start = AT91SAM9263_BASE_RSTC,
+ .end = AT91SAM9263_BASE_RSTC + SZ_16 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AT91SAM9263_BASE_SDRAMC0,
+ .end = AT91SAM9263_BASE_SDRAMC0 + SZ_512 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device rstc_device = {
+ .name = "at91-sam9260-reset",
+ .resource = rstc_resources,
+ .num_resources = ARRAY_SIZE(rstc_resources),
+};
+
+static struct resource shdwc_resources[] = {
+ [0] = {
+ .start = AT91SAM9263_BASE_SHDWC,
+ .end = AT91SAM9263_BASE_SHDWC + SZ_16 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device shdwc_device = {
+ .name = "at91-poweroff",
+ .resource = shdwc_resources,
+ .num_resources = ARRAY_SIZE(shdwc_resources),
+};
+
+static void __init at91sam9263_register_devices(void)
+{
+ platform_device_register(&rstc_device);
+ platform_device_register(&shdwc_device);
+}
+
/* --------------------------------------------------------------------
* Interrupt initialization
* -------------------------------------------------------------------- */
@@ -386,11 +422,18 @@
0, /* Advanced Interrupt Controller (IRQ1) */
};
+static void __init at91sam9263_init_time(void)
+{
+ at91sam926x_pit_init(NR_IRQS_LEGACY + AT91_ID_SYS);
+}
+
AT91_SOC_START(at91sam9263)
.map_io = at91sam9263_map_io,
.default_irq_priority = at91sam9263_default_irq_priority,
.extern_irq = (1 << AT91SAM9263_ID_IRQ0) | (1 << AT91SAM9263_ID_IRQ1),
.ioremap_registers = at91sam9263_ioremap_registers,
.register_clocks = at91sam9263_register_clocks,
+ .register_devices = at91sam9263_register_devices,
.init = at91sam9263_initialize,
+ .init_time = at91sam9263_init_time,
AT91_SOC_END
diff --git a/arch/arm/mach-at91/at91sam926x_time.c b/arch/arm/mach-at91/at91sam926x_time.c
deleted file mode 100644
index 0a9e2fc..0000000
--- a/arch/arm/mach-at91/at91sam926x_time.c
+++ /dev/null
@@ -1,294 +0,0 @@
-/*
- * at91sam926x_time.c - Periodic Interval Timer (PIT) for at91sam926x
- *
- * Copyright (C) 2005-2006 M. Amine SAYA, ATMEL Rousset, France
- * Revision 2005 M. Nicolas Diremdjian, ATMEL Rousset, France
- * Converted to ClockSource/ClockEvents by David Brownell.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/kernel.h>
-#include <linux/clk.h>
-#include <linux/clockchips.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-
-#include <asm/mach/time.h>
-#include <mach/hardware.h>
-
-#define AT91_PIT_MR 0x00 /* Mode Register */
-#define AT91_PIT_PITIEN (1 << 25) /* Timer Interrupt Enable */
-#define AT91_PIT_PITEN (1 << 24) /* Timer Enabled */
-#define AT91_PIT_PIV (0xfffff) /* Periodic Interval Value */
-
-#define AT91_PIT_SR 0x04 /* Status Register */
-#define AT91_PIT_PITS (1 << 0) /* Timer Status */
-
-#define AT91_PIT_PIVR 0x08 /* Periodic Interval Value Register */
-#define AT91_PIT_PIIR 0x0c /* Periodic Interval Image Register */
-#define AT91_PIT_PICNT (0xfff << 20) /* Interval Counter */
-#define AT91_PIT_CPIV (0xfffff) /* Inverval Value */
-
-#define PIT_CPIV(x) ((x) & AT91_PIT_CPIV)
-#define PIT_PICNT(x) (((x) & AT91_PIT_PICNT) >> 20)
-
-static u32 pit_cycle; /* write-once */
-static u32 pit_cnt; /* access only w/system irq blocked */
-static void __iomem *pit_base_addr __read_mostly;
-static struct clk *mck;
-
-static inline unsigned int pit_read(unsigned int reg_offset)
-{
- return __raw_readl(pit_base_addr + reg_offset);
-}
-
-static inline void pit_write(unsigned int reg_offset, unsigned long value)
-{
- __raw_writel(value, pit_base_addr + reg_offset);
-}
-
-/*
- * Clocksource: just a monotonic counter of MCK/16 cycles.
- * We don't care whether or not PIT irqs are enabled.
- */
-static cycle_t read_pit_clk(struct clocksource *cs)
-{
- unsigned long flags;
- u32 elapsed;
- u32 t;
-
- raw_local_irq_save(flags);
- elapsed = pit_cnt;
- t = pit_read(AT91_PIT_PIIR);
- raw_local_irq_restore(flags);
-
- elapsed += PIT_PICNT(t) * pit_cycle;
- elapsed += PIT_CPIV(t);
- return elapsed;
-}
-
-static struct clocksource pit_clk = {
- .name = "pit",
- .rating = 175,
- .read = read_pit_clk,
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
-};
-
-
-/*
- * Clockevent device: interrupts every 1/HZ (== pit_cycles * MCK/16)
- */
-static void
-pit_clkevt_mode(enum clock_event_mode mode, struct clock_event_device *dev)
-{
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- /* update clocksource counter */
- pit_cnt += pit_cycle * PIT_PICNT(pit_read(AT91_PIT_PIVR));
- pit_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN
- | AT91_PIT_PITIEN);
- break;
- case CLOCK_EVT_MODE_ONESHOT:
- BUG();
- /* FALLTHROUGH */
- case CLOCK_EVT_MODE_SHUTDOWN:
- case CLOCK_EVT_MODE_UNUSED:
- /* disable irq, leaving the clocksource active */
- pit_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN);
- break;
- case CLOCK_EVT_MODE_RESUME:
- break;
- }
-}
-
-static void at91sam926x_pit_suspend(struct clock_event_device *cedev)
-{
- /* Disable timer */
- pit_write(AT91_PIT_MR, 0);
-}
-
-static void at91sam926x_pit_reset(void)
-{
- /* Disable timer and irqs */
- pit_write(AT91_PIT_MR, 0);
-
- /* Clear any pending interrupts, wait for PIT to stop counting */
- while (PIT_CPIV(pit_read(AT91_PIT_PIVR)) != 0)
- cpu_relax();
-
- /* Start PIT but don't enable IRQ */
- pit_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN);
-}
-
-static void at91sam926x_pit_resume(struct clock_event_device *cedev)
-{
- at91sam926x_pit_reset();
-}
-
-static struct clock_event_device pit_clkevt = {
- .name = "pit",
- .features = CLOCK_EVT_FEAT_PERIODIC,
- .shift = 32,
- .rating = 100,
- .set_mode = pit_clkevt_mode,
- .suspend = at91sam926x_pit_suspend,
- .resume = at91sam926x_pit_resume,
-};
-
-
-/*
- * IRQ handler for the timer.
- */
-static irqreturn_t at91sam926x_pit_interrupt(int irq, void *dev_id)
-{
- /*
- * irqs should be disabled here, but as the irq is shared they are only
- * guaranteed to be off if the timer irq is registered first.
- */
- WARN_ON_ONCE(!irqs_disabled());
-
- /* The PIT interrupt may be disabled, and is shared */
- if ((pit_clkevt.mode == CLOCK_EVT_MODE_PERIODIC)
- && (pit_read(AT91_PIT_SR) & AT91_PIT_PITS)) {
- unsigned nr_ticks;
-
- /* Get number of ticks performed before irq, and ack it */
- nr_ticks = PIT_PICNT(pit_read(AT91_PIT_PIVR));
- do {
- pit_cnt += pit_cycle;
- pit_clkevt.event_handler(&pit_clkevt);
- nr_ticks--;
- } while (nr_ticks);
-
- return IRQ_HANDLED;
- }
-
- return IRQ_NONE;
-}
-
-static struct irqaction at91sam926x_pit_irq = {
- .name = "at91_tick",
- .flags = IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
- .handler = at91sam926x_pit_interrupt,
- .irq = NR_IRQS_LEGACY + AT91_ID_SYS,
-};
-
-#ifdef CONFIG_OF
-static struct of_device_id pit_timer_ids[] = {
- { .compatible = "atmel,at91sam9260-pit" },
- { /* sentinel */ }
-};
-
-static int __init of_at91sam926x_pit_init(void)
-{
- struct device_node *np;
- int ret;
-
- np = of_find_matching_node(NULL, pit_timer_ids);
- if (!np)
- goto err;
-
- pit_base_addr = of_iomap(np, 0);
- if (!pit_base_addr)
- goto node_err;
-
- mck = of_clk_get(np, 0);
-
- /* Get the interrupts property */
- ret = irq_of_parse_and_map(np, 0);
- if (!ret) {
- pr_crit("AT91: PIT: Unable to get IRQ from DT\n");
- if (!IS_ERR(mck))
- clk_put(mck);
- goto ioremap_err;
- }
- at91sam926x_pit_irq.irq = ret;
-
- of_node_put(np);
-
- return 0;
-
-ioremap_err:
- iounmap(pit_base_addr);
-node_err:
- of_node_put(np);
-err:
- return -EINVAL;
-}
-#else
-static int __init of_at91sam926x_pit_init(void)
-{
- return -EINVAL;
-}
-#endif
-
-/*
- * Set up both clocksource and clockevent support.
- */
-void __init at91sam926x_pit_init(void)
-{
- unsigned long pit_rate;
- unsigned bits;
- int ret;
-
- mck = ERR_PTR(-ENOENT);
-
- /* For device tree enabled device: initialize here */
- of_at91sam926x_pit_init();
-
- /*
- * Use our actual MCK to figure out how many MCK/16 ticks per
- * 1/HZ period (instead of a compile-time constant LATCH).
- */
- if (IS_ERR(mck))
- mck = clk_get(NULL, "mck");
-
- if (IS_ERR(mck))
- panic("AT91: PIT: Unable to get mck clk\n");
- pit_rate = clk_get_rate(mck) / 16;
- pit_cycle = (pit_rate + HZ/2) / HZ;
- WARN_ON(((pit_cycle - 1) & ~AT91_PIT_PIV) != 0);
-
- /* Initialize and enable the timer */
- at91sam926x_pit_reset();
-
- /*
- * Register clocksource. The high order bits of PIV are unused,
- * so this isn't a 32-bit counter unless we get clockevent irqs.
- */
- bits = 12 /* PICNT */ + ilog2(pit_cycle) /* PIV */;
- pit_clk.mask = CLOCKSOURCE_MASK(bits);
- clocksource_register_hz(&pit_clk, pit_rate);
-
- /* Set up irq handler */
- ret = setup_irq(at91sam926x_pit_irq.irq, &at91sam926x_pit_irq);
- if (ret)
- pr_crit("AT91: PIT: Unable to setup IRQ\n");
-
- /* Set up and register clockevents */
- pit_clkevt.mult = div_sc(pit_rate, NSEC_PER_SEC, pit_clkevt.shift);
- pit_clkevt.cpumask = cpumask_of(0);
- clockevents_register_device(&pit_clkevt);
-}
-
-void __init at91sam926x_ioremap_pit(u32 addr)
-{
-#if defined(CONFIG_OF)
- struct device_node *np =
- of_find_matching_node(NULL, pit_timer_ids);
-
- if (np) {
- of_node_put(np);
- return;
- }
-#endif
- pit_base_addr = ioremap(addr, 16);
-
- if (!pit_base_addr)
- panic("Impossible to ioremap PIT\n");
-}
diff --git a/arch/arm/mach-at91/at91sam9_alt_reset.S b/arch/arm/mach-at91/at91sam9_alt_reset.S
deleted file mode 100644
index f039538..0000000
--- a/arch/arm/mach-at91/at91sam9_alt_reset.S
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * reset AT91SAM9G20 as per errata
- *
- * (C) BitBox Ltd 2010
- *
- * unless the SDRAM is cleanly shutdown before we hit the
- * reset register it can be left driving the data bus and
- * killing the chance of a subsequent boot from NAND
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/linkage.h>
-#include <mach/hardware.h>
-#include <mach/at91_ramc.h>
-#include "at91_rstc.h"
-
- .arm
-
- .globl at91sam9_alt_restart
-
-at91sam9_alt_restart: ldr r0, =at91_ramc_base @ preload constants
- ldr r0, [r0]
- ldr r4, =at91_rstc_base
- ldr r1, [r4]
-
- mov r2, #1
- mov r3, #AT91_SDRAMC_LPCB_POWER_DOWN
- ldr r4, =AT91_RSTC_KEY | AT91_RSTC_PERRST | AT91_RSTC_PROCRST
-
- .balign 32 @ align to cache line
-
- str r2, [r0, #AT91_SDRAMC_TR] @ disable SDRAM access
- str r3, [r0, #AT91_SDRAMC_LPR] @ power down SDRAM
- str r4, [r1, #AT91_RSTC_CR] @ reset processor
-
- b .
diff --git a/arch/arm/mach-at91/at91sam9g45.c b/arch/arm/mach-at91/at91sam9g45.c
index 9d45496..405427e 100644
--- a/arch/arm/mach-at91/at91sam9g45.c
+++ b/arch/arm/mach-at91/at91sam9g45.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/clk/at91_pmc.h>
+#include <linux/platform_device.h>
#include <asm/irq.h>
#include <asm/mach/arch.h>
@@ -371,8 +372,6 @@
static void __init at91sam9g45_ioremap_registers(void)
{
- at91_ioremap_shdwc(AT91SAM9G45_BASE_SHDWC);
- at91_ioremap_rstc(AT91SAM9G45_BASE_RSTC);
at91_ioremap_ramc(0, AT91SAM9G45_BASE_DDRSDRC1, 512);
at91_ioremap_ramc(1, AT91SAM9G45_BASE_DDRSDRC0, 512);
at91sam926x_ioremap_pit(AT91SAM9G45_BASE_PIT);
@@ -384,7 +383,6 @@
static void __init at91sam9g45_initialize(void)
{
arm_pm_idle = at91sam9_idle;
- arm_pm_restart = at91sam9g45_restart;
at91_sysirq_mask_rtc(AT91SAM9G45_BASE_RTC);
at91_sysirq_mask_rtt(AT91SAM9G45_BASE_RTT);
@@ -393,6 +391,50 @@
at91_gpio_init(at91sam9g45_gpio, 5);
}
+static struct resource rstc_resources[] = {
+ [0] = {
+ .start = AT91SAM9G45_BASE_RSTC,
+ .end = AT91SAM9G45_BASE_RSTC + SZ_16 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AT91SAM9G45_BASE_DDRSDRC1,
+ .end = AT91SAM9G45_BASE_DDRSDRC1 + SZ_512 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [2] = {
+ .start = AT91SAM9G45_BASE_DDRSDRC0,
+ .end = AT91SAM9G45_BASE_DDRSDRC0 + SZ_512 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device rstc_device = {
+ .name = "at91-sam9g45-reset",
+ .resource = rstc_resources,
+ .num_resources = ARRAY_SIZE(rstc_resources),
+};
+
+static struct resource shdwc_resources[] = {
+ [0] = {
+ .start = AT91SAM9G45_BASE_SHDWC,
+ .end = AT91SAM9G45_BASE_SHDWC + SZ_16 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device shdwc_device = {
+ .name = "at91-poweroff",
+ .resource = shdwc_resources,
+ .num_resources = ARRAY_SIZE(shdwc_resources),
+};
+
+static void __init at91sam9g45_register_devices(void)
+{
+ platform_device_register(&rstc_device);
+ platform_device_register(&shdwc_device);
+}
+
/* --------------------------------------------------------------------
* Interrupt initialization
* -------------------------------------------------------------------- */
@@ -435,11 +477,18 @@
0, /* Advanced Interrupt Controller (IRQ0) */
};
+static void __init at91sam9g45_init_time(void)
+{
+ at91sam926x_pit_init(NR_IRQS_LEGACY + AT91_ID_SYS);
+}
+
AT91_SOC_START(at91sam9g45)
.map_io = at91sam9g45_map_io,
.default_irq_priority = at91sam9g45_default_irq_priority,
.extern_irq = (1 << AT91SAM9G45_ID_IRQ0),
.ioremap_registers = at91sam9g45_ioremap_registers,
.register_clocks = at91sam9g45_register_clocks,
+ .register_devices = at91sam9g45_register_devices,
.init = at91sam9g45_initialize,
+ .init_time = at91sam9g45_init_time,
AT91_SOC_END
diff --git a/arch/arm/mach-at91/at91sam9g45_reset.S b/arch/arm/mach-at91/at91sam9g45_reset.S
deleted file mode 100644
index c40c1e2..0000000
--- a/arch/arm/mach-at91/at91sam9g45_reset.S
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * reset AT91SAM9G45 as per errata
- *
- * Copyright (C) 2011 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcosoft.com>
- *
- * unless the SDRAM is cleanly shutdown before we hit the
- * reset register it can be left driving the data bus and
- * killing the chance of a subsequent boot from NAND
- *
- * GPLv2 Only
- */
-
-#include <linux/linkage.h>
-#include <mach/hardware.h>
-#include <mach/at91_ramc.h>
-#include "at91_rstc.h"
- .arm
-
-/*
- * at91_ramc_base is an array void*
- * init at NULL if only one DDR controler is present in or DT
- */
- .globl at91sam9g45_restart
-
-at91sam9g45_restart:
- ldr r5, =at91_ramc_base @ preload constants
- ldr r0, [r5]
- ldr r5, [r5, #4] @ ddr1
- cmp r5, #0
- ldr r4, =at91_rstc_base
- ldr r1, [r4]
-
- mov r2, #1
- mov r3, #AT91_DDRSDRC_LPCB_POWER_DOWN
- ldr r4, =AT91_RSTC_KEY | AT91_RSTC_PERRST | AT91_RSTC_PROCRST
-
- .balign 32 @ align to cache line
-
- strne r2, [r5, #AT91_DDRSDRC_RTR] @ disable DDR1 access
- strne r3, [r5, #AT91_DDRSDRC_LPR] @ power down DDR1
- str r2, [r0, #AT91_DDRSDRC_RTR] @ disable DDR0 access
- str r3, [r0, #AT91_DDRSDRC_LPR] @ power down DDR0
- str r4, [r1, #AT91_RSTC_CR] @ reset processor
-
- b .
diff --git a/arch/arm/mach-at91/at91sam9rl.c b/arch/arm/mach-at91/at91sam9rl.c
index 878d501..f553e4e 100644
--- a/arch/arm/mach-at91/at91sam9rl.c
+++ b/arch/arm/mach-at91/at91sam9rl.c
@@ -10,6 +10,7 @@
*/
#include <linux/module.h>
+#include <linux/platform_device.h>
#include <linux/clk/at91_pmc.h>
#include <asm/proc-fns.h>
@@ -23,7 +24,6 @@
#include <mach/hardware.h>
#include "at91_aic.h"
-#include "at91_rstc.h"
#include "soc.h"
#include "generic.h"
#include "sam9_smc.h"
@@ -311,8 +311,6 @@
static void __init at91sam9rl_ioremap_registers(void)
{
- at91_ioremap_shdwc(AT91SAM9RL_BASE_SHDWC);
- at91_ioremap_rstc(AT91SAM9RL_BASE_RSTC);
at91_ioremap_ramc(0, AT91SAM9RL_BASE_SDRAMC, 512);
at91sam926x_ioremap_pit(AT91SAM9RL_BASE_PIT);
at91sam9_ioremap_smc(0, AT91SAM9RL_BASE_SMC);
@@ -323,7 +321,6 @@
static void __init at91sam9rl_initialize(void)
{
arm_pm_idle = at91sam9_idle;
- arm_pm_restart = at91sam9_alt_restart;
at91_sysirq_mask_rtc(AT91SAM9RL_BASE_RTC);
at91_sysirq_mask_rtt(AT91SAM9RL_BASE_RTT);
@@ -332,6 +329,45 @@
at91_gpio_init(at91sam9rl_gpio, 4);
}
+static struct resource rstc_resources[] = {
+ [0] = {
+ .start = AT91SAM9RL_BASE_RSTC,
+ .end = AT91SAM9RL_BASE_RSTC + SZ_16 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AT91SAM9RL_BASE_SDRAMC,
+ .end = AT91SAM9RL_BASE_SDRAMC + SZ_512 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device rstc_device = {
+ .name = "at91-sam9260-reset",
+ .resource = rstc_resources,
+ .num_resources = ARRAY_SIZE(rstc_resources),
+};
+
+static struct resource shdwc_resources[] = {
+ [0] = {
+ .start = AT91SAM9RL_BASE_SHDWC,
+ .end = AT91SAM9RL_BASE_SHDWC + SZ_16 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device shdwc_device = {
+ .name = "at91-poweroff",
+ .resource = shdwc_resources,
+ .num_resources = ARRAY_SIZE(shdwc_resources),
+};
+
+static void __init at91sam9rl_register_devices(void)
+{
+ platform_device_register(&rstc_device);
+ platform_device_register(&shdwc_device);
+}
+
/* --------------------------------------------------------------------
* Interrupt initialization
* -------------------------------------------------------------------- */
@@ -374,6 +410,11 @@
0, /* Advanced Interrupt Controller */
};
+static void __init at91sam9rl_init_time(void)
+{
+ at91sam926x_pit_init(NR_IRQS_LEGACY + AT91_ID_SYS);
+}
+
AT91_SOC_START(at91sam9rl)
.map_io = at91sam9rl_map_io,
.default_irq_priority = at91sam9rl_default_irq_priority,
@@ -382,5 +423,7 @@
#if defined(CONFIG_OLD_CLK_AT91)
.register_clocks = at91sam9rl_register_clocks,
#endif
+ .register_devices = at91sam9rl_register_devices,
.init = at91sam9rl_initialize,
+ .init_time = at91sam9rl_init_time,
AT91_SOC_END
diff --git a/arch/arm/mach-at91/board-afeb-9260v1.c b/arch/arm/mach-at91/board-afeb-9260v1.c
index 597c649..e76e35c 100644
--- a/arch/arm/mach-at91/board-afeb-9260v1.c
+++ b/arch/arm/mach-at91/board-afeb-9260v1.c
@@ -167,6 +167,8 @@
static void __init afeb9260_board_init(void)
{
+ at91_register_devices();
+
/* Serial */
/* DBGU on ttyS0. (Rx & Tx only) */
at91_register_uart(0, 0, 0);
@@ -211,7 +213,7 @@
MACHINE_START(AFEB9260, "Custom afeb9260 board")
/* Maintainer: Sergey Lapin <slapin@ossfans.org> */
- .init_time = at91sam926x_pit_init,
+ .init_time = at91_init_time,
.map_io = at91_map_io,
.handle_irq = at91_aic_handle_irq,
.init_early = afeb9260_init_early,
diff --git a/arch/arm/mach-at91/board-cam60.c b/arch/arm/mach-at91/board-cam60.c
index a30502c..ae827dd 100644
--- a/arch/arm/mach-at91/board-cam60.c
+++ b/arch/arm/mach-at91/board-cam60.c
@@ -170,6 +170,8 @@
static void __init cam60_board_init(void)
{
+ at91_register_devices();
+
/* Serial */
/* DBGU on ttyS0. (Rx & Tx only) */
at91_register_uart(0, 0, 0);
@@ -188,7 +190,7 @@
MACHINE_START(CAM60, "KwikByte CAM60")
/* Maintainer: KwikByte */
- .init_time = at91sam926x_pit_init,
+ .init_time = at91_init_time,
.map_io = at91_map_io,
.handle_irq = at91_aic_handle_irq,
.init_early = cam60_init_early,
diff --git a/arch/arm/mach-at91/board-cpu9krea.c b/arch/arm/mach-at91/board-cpu9krea.c
index 2037f78..731c831 100644
--- a/arch/arm/mach-at91/board-cpu9krea.c
+++ b/arch/arm/mach-at91/board-cpu9krea.c
@@ -322,6 +322,8 @@
static void __init cpu9krea_board_init(void)
{
+ at91_register_devices();
+
/* NOR */
cpu9krea_add_device_nor();
/* Serial */
@@ -375,7 +377,7 @@
MACHINE_START(CPUAT9G20, "Eukrea CPU9G20")
#endif
/* Maintainer: Eric Benard - EUKREA Electromatique */
- .init_time = at91sam926x_pit_init,
+ .init_time = at91_init_time,
.map_io = at91_map_io,
.handle_irq = at91_aic_handle_irq,
.init_early = cpu9krea_init_early,
diff --git a/arch/arm/mach-at91/board-dt-rm9200.c b/arch/arm/mach-at91/board-dt-rm9200.c
index 3a185fa..61ea214 100644
--- a/arch/arm/mach-at91/board-dt-rm9200.c
+++ b/arch/arm/mach-at91/board-dt-rm9200.c
@@ -24,17 +24,6 @@
#include "at91_aic.h"
#include "generic.h"
-
-static const struct of_device_id irq_of_match[] __initconst = {
- { .compatible = "atmel,at91rm9200-aic", .data = at91_aic_of_init },
- { /*sentinel*/ }
-};
-
-static void __init at91rm9200_dt_init_irq(void)
-{
- of_irq_init(irq_of_match);
-}
-
static const char *at91rm9200_dt_board_compat[] __initdata = {
"atmel,at91rm9200",
NULL
@@ -43,8 +32,6 @@
DT_MACHINE_START(at91rm9200_dt, "Atmel AT91RM9200 (Device Tree)")
.init_time = at91rm9200_timer_init,
.map_io = at91_map_io,
- .handle_irq = at91_aic_handle_irq,
.init_early = at91rm9200_dt_initialize,
- .init_irq = at91rm9200_dt_init_irq,
.dt_compat = at91rm9200_dt_board_compat,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-dt-sam9.c b/arch/arm/mach-at91/board-dt-sam9.c
index 575b0be..d3048cc 100644
--- a/arch/arm/mach-at91/board-dt-sam9.c
+++ b/arch/arm/mach-at91/board-dt-sam9.c
@@ -25,26 +25,6 @@
#include "board.h"
#include "generic.h"
-
-static void __init sam9_dt_timer_init(void)
-{
-#if defined(CONFIG_COMMON_CLK)
- of_clk_init(NULL);
-#endif
- at91sam926x_pit_init();
-}
-
-static const struct of_device_id irq_of_match[] __initconst = {
-
- { .compatible = "atmel,at91rm9200-aic", .data = at91_aic_of_init },
- { /*sentinel*/ }
-};
-
-static void __init at91_dt_init_irq(void)
-{
- of_irq_init(irq_of_match);
-}
-
static const char *at91_dt_board_compat[] __initdata = {
"atmel,at91sam9",
NULL
@@ -52,10 +32,7 @@
DT_MACHINE_START(at91sam_dt, "Atmel AT91SAM (Device Tree)")
/* Maintainer: Atmel */
- .init_time = sam9_dt_timer_init,
.map_io = at91_map_io,
- .handle_irq = at91_aic_handle_irq,
.init_early = at91_dt_initialize,
- .init_irq = at91_dt_init_irq,
.dt_compat = at91_dt_board_compat,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-dt-sama5.c b/arch/arm/mach-at91/board-dt-sama5.c
index 075ec05..be6aa2e2 100644
--- a/arch/arm/mach-at91/board-dt-sama5.c
+++ b/arch/arm/mach-at91/board-dt-sama5.c
@@ -27,25 +27,6 @@
#include "at91_aic.h"
#include "generic.h"
-static void __init sama5_dt_timer_init(void)
-{
-#if defined(CONFIG_COMMON_CLK)
- of_clk_init(NULL);
-#endif
- at91sam926x_pit_init();
-}
-
-static const struct of_device_id irq_of_match[] __initconst = {
-
- { .compatible = "atmel,sama5d3-aic", .data = at91_aic5_of_init },
- { /*sentinel*/ }
-};
-
-static void __init at91_dt_init_irq(void)
-{
- of_irq_init(irq_of_match);
-}
-
static int ksz9021rn_phy_fixup(struct phy_device *phy)
{
int value;
@@ -80,11 +61,8 @@
DT_MACHINE_START(sama5_dt, "Atmel SAMA5 (Device Tree)")
/* Maintainer: Atmel */
- .init_time = sama5_dt_timer_init,
.map_io = at91_map_io,
- .handle_irq = at91_aic5_handle_irq,
.init_early = at91_dt_initialize,
- .init_irq = at91_dt_init_irq,
.init_machine = sama5_dt_device_init,
.dt_compat = sama5_dt_board_compat,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-flexibity.c b/arch/arm/mach-at91/board-flexibity.c
index 68f1ab6..a6aa4a2 100644
--- a/arch/arm/mach-at91/board-flexibity.c
+++ b/arch/arm/mach-at91/board-flexibity.c
@@ -138,6 +138,8 @@
static void __init flexibity_board_init(void)
{
+ at91_register_devices();
+
/* Serial */
/* DBGU on ttyS0. (Rx & Tx only) */
at91_register_uart(0, 0, 0);
@@ -160,7 +162,7 @@
MACHINE_START(FLEXIBITY, "Flexibity Connect")
/* Maintainer: Maxim Osipov */
- .init_time = at91sam926x_pit_init,
+ .init_time = at91_init_time,
.map_io = at91_map_io,
.handle_irq = at91_aic_handle_irq,
.init_early = flexibity_init_early,
diff --git a/arch/arm/mach-at91/board-foxg20.c b/arch/arm/mach-at91/board-foxg20.c
index 8b22c60..ec290b6 100644
--- a/arch/arm/mach-at91/board-foxg20.c
+++ b/arch/arm/mach-at91/board-foxg20.c
@@ -263,7 +263,7 @@
MACHINE_START(ACMENETUSFOXG20, "Acme Systems srl FOX Board G20")
/* Maintainer: Sergio Tanzilli */
- .init_time = at91sam926x_pit_init,
+ .init_time = at91_init_time,
.map_io = at91_map_io,
.handle_irq = at91_aic_handle_irq,
.init_early = foxg20_init_early,
diff --git a/arch/arm/mach-at91/board-gsia18s.c b/arch/arm/mach-at91/board-gsia18s.c
index b729dd12..bf5cc55c 100644
--- a/arch/arm/mach-at91/board-gsia18s.c
+++ b/arch/arm/mach-at91/board-gsia18s.c
@@ -576,7 +576,7 @@
}
MACHINE_START(GSIA18S, "GS_IA18_S")
- .init_time = at91sam926x_pit_init,
+ .init_time = at91_init_time,
.map_io = at91_map_io,
.handle_irq = at91_aic_handle_irq,
.init_early = gsia18s_init_early,
diff --git a/arch/arm/mach-at91/board-pcontrol-g20.c b/arch/arm/mach-at91/board-pcontrol-g20.c
index b48d95e..9c26b94 100644
--- a/arch/arm/mach-at91/board-pcontrol-g20.c
+++ b/arch/arm/mach-at91/board-pcontrol-g20.c
@@ -219,7 +219,7 @@
MACHINE_START(PCONTROL_G20, "PControl G20")
/* Maintainer: pgsellmann@portner-elektronik.at */
- .init_time = at91sam926x_pit_init,
+ .init_time = at91_init_time,
.map_io = at91_map_io,
.handle_irq = at91_aic_handle_irq,
.init_early = pcontrol_g20_init_early,
diff --git a/arch/arm/mach-at91/board-sam9-l9260.c b/arch/arm/mach-at91/board-sam9-l9260.c
index d24dda6..c2166e3 100644
--- a/arch/arm/mach-at91/board-sam9-l9260.c
+++ b/arch/arm/mach-at91/board-sam9-l9260.c
@@ -187,6 +187,8 @@
static void __init ek_board_init(void)
{
+ at91_register_devices();
+
/* Serial */
/* DBGU on ttyS0. (Rx & Tx only) */
at91_register_uart(0, 0, 0);
@@ -219,7 +221,7 @@
MACHINE_START(SAM9_L9260, "Olimex SAM9-L9260")
/* Maintainer: Olimex */
- .init_time = at91sam926x_pit_init,
+ .init_time = at91_init_time,
.map_io = at91_map_io,
.handle_irq = at91_aic_handle_irq,
.init_early = ek_init_early,
diff --git a/arch/arm/mach-at91/board-sam9260ek.c b/arch/arm/mach-at91/board-sam9260ek.c
index 65dea12..bf8a946 100644
--- a/arch/arm/mach-at91/board-sam9260ek.c
+++ b/arch/arm/mach-at91/board-sam9260ek.c
@@ -45,7 +45,6 @@
#include <mach/system_rev.h>
#include "at91_aic.h"
-#include "at91_shdwc.h"
#include "board.h"
#include "sam9_smc.h"
#include "generic.h"
@@ -307,6 +306,8 @@
static void __init ek_board_init(void)
{
+ at91_register_devices();
+
/* Serial */
/* DBGU on ttyS0. (Rx & Tx only) */
at91_register_uart(0, 0, 0);
@@ -344,7 +345,7 @@
MACHINE_START(AT91SAM9260EK, "Atmel AT91SAM9260-EK")
/* Maintainer: Atmel */
- .init_time = at91sam926x_pit_init,
+ .init_time = at91_init_time,
.map_io = at91_map_io,
.handle_irq = at91_aic_handle_irq,
.init_early = ek_init_early,
diff --git a/arch/arm/mach-at91/board-sam9261ek.c b/arch/arm/mach-at91/board-sam9261ek.c
index 4637432..e85ada8 100644
--- a/arch/arm/mach-at91/board-sam9261ek.c
+++ b/arch/arm/mach-at91/board-sam9261ek.c
@@ -49,7 +49,6 @@
#include <mach/system_rev.h>
#include "at91_aic.h"
-#include "at91_shdwc.h"
#include "board.h"
#include "sam9_smc.h"
#include "generic.h"
@@ -561,6 +560,8 @@
static void __init ek_board_init(void)
{
+ at91_register_devices();
+
/* Serial */
/* DBGU on ttyS0. (Rx & Tx only) */
at91_register_uart(0, 0, 0);
@@ -603,7 +604,7 @@
MACHINE_START(AT91SAM9261EK, "Atmel AT91SAM9261-EK")
/* Maintainer: Atmel */
- .init_time = at91sam926x_pit_init,
+ .init_time = at91_init_time,
.map_io = at91_map_io,
.handle_irq = at91_aic_handle_irq,
.init_early = ek_init_early,
@@ -613,7 +614,7 @@
MACHINE_START(AT91SAM9G10EK, "Atmel AT91SAM9G10-EK")
/* Maintainer: Atmel */
- .init_time = at91sam926x_pit_init,
+ .init_time = at91_init_time,
.map_io = at91_map_io,
.handle_irq = at91_aic_handle_irq,
.init_early = ek_init_early,
diff --git a/arch/arm/mach-at91/board-sam9263ek.c b/arch/arm/mach-at91/board-sam9263ek.c
index fc44609..d76680f 100644
--- a/arch/arm/mach-at91/board-sam9263ek.c
+++ b/arch/arm/mach-at91/board-sam9263ek.c
@@ -50,7 +50,6 @@
#include <mach/system_rev.h>
#include "at91_aic.h"
-#include "at91_shdwc.h"
#include "board.h"
#include "sam9_smc.h"
#include "generic.h"
@@ -439,6 +438,8 @@
static void __init ek_board_init(void)
{
+ at91_register_devices();
+
/* Serial */
/* DBGU on ttyS0. (Rx & Tx only) */
at91_register_uart(0, 0, 0);
@@ -483,7 +484,7 @@
MACHINE_START(AT91SAM9263EK, "Atmel AT91SAM9263-EK")
/* Maintainer: Atmel */
- .init_time = at91sam926x_pit_init,
+ .init_time = at91_init_time,
.map_io = at91_map_io,
.handle_irq = at91_aic_handle_irq,
.init_early = ek_init_early,
diff --git a/arch/arm/mach-at91/board-sam9g20ek.c b/arch/arm/mach-at91/board-sam9g20ek.c
index e1be6e2..49f0752 100644
--- a/arch/arm/mach-at91/board-sam9g20ek.c
+++ b/arch/arm/mach-at91/board-sam9g20ek.c
@@ -410,7 +410,7 @@
MACHINE_START(AT91SAM9G20EK, "Atmel AT91SAM9G20-EK")
/* Maintainer: Atmel */
- .init_time = at91sam926x_pit_init,
+ .init_time = at91_init_time,
.map_io = at91_map_io,
.handle_irq = at91_aic_handle_irq,
.init_early = ek_init_early,
@@ -420,7 +420,7 @@
MACHINE_START(AT91SAM9G20EK_2MMC, "Atmel AT91SAM9G20-EK 2 MMC Slot Mod")
/* Maintainer: Atmel */
- .init_time = at91sam926x_pit_init,
+ .init_time = at91_init_time,
.map_io = at91_map_io,
.handle_irq = at91_aic_handle_irq,
.init_early = ek_init_early,
diff --git a/arch/arm/mach-at91/board-sam9m10g45ek.c b/arch/arm/mach-at91/board-sam9m10g45ek.c
index b227732..a517c7f 100644
--- a/arch/arm/mach-at91/board-sam9m10g45ek.c
+++ b/arch/arm/mach-at91/board-sam9m10g45ek.c
@@ -48,7 +48,6 @@
#include <mach/system_rev.h>
#include "at91_aic.h"
-#include "at91_shdwc.h"
#include "board.h"
#include "sam9_smc.h"
#include "generic.h"
@@ -471,6 +470,8 @@
static void __init ek_board_init(void)
{
+ at91_register_devices();
+
/* Serial */
/* DGBU on ttyS0. (Rx & Tx only) */
at91_register_uart(0, 0, 0);
@@ -517,7 +518,7 @@
MACHINE_START(AT91SAM9M10G45EK, "Atmel AT91SAM9M10G45-EK")
/* Maintainer: Atmel */
- .init_time = at91sam926x_pit_init,
+ .init_time = at91_init_time,
.map_io = at91_map_io,
.handle_irq = at91_aic_handle_irq,
.init_early = ek_init_early,
diff --git a/arch/arm/mach-at91/board-sam9rlek.c b/arch/arm/mach-at91/board-sam9rlek.c
index b64648b..8bca329 100644
--- a/arch/arm/mach-at91/board-sam9rlek.c
+++ b/arch/arm/mach-at91/board-sam9rlek.c
@@ -35,7 +35,6 @@
#include "at91_aic.h"
-#include "at91_shdwc.h"
#include "board.h"
#include "sam9_smc.h"
#include "generic.h"
@@ -292,6 +291,8 @@
static void __init ek_board_init(void)
{
+ at91_register_devices();
+
/* Serial */
/* DBGU on ttyS0. (Rx & Tx only) */
at91_register_uart(0, 0, 0);
@@ -323,7 +324,7 @@
MACHINE_START(AT91SAM9RLEK, "Atmel AT91SAM9RL-EK")
/* Maintainer: Atmel */
- .init_time = at91sam926x_pit_init,
+ .init_time = at91_init_time,
.map_io = at91_map_io,
.handle_irq = at91_aic_handle_irq,
.init_early = ek_init_early,
diff --git a/arch/arm/mach-at91/board-snapper9260.c b/arch/arm/mach-at91/board-snapper9260.c
index 1b870e6..b4aff84 100644
--- a/arch/arm/mach-at91/board-snapper9260.c
+++ b/arch/arm/mach-at91/board-snapper9260.c
@@ -154,6 +154,8 @@
static void __init snapper9260_board_init(void)
{
+ at91_register_devices();
+
at91_add_device_i2c(snapper9260_i2c_devices,
ARRAY_SIZE(snapper9260_i2c_devices));
@@ -178,7 +180,7 @@
}
MACHINE_START(SNAPPER_9260, "Bluewater Systems Snapper 9260/9G20 module")
- .init_time = at91sam926x_pit_init,
+ .init_time = at91_init_time,
.map_io = at91_map_io,
.handle_irq = at91_aic_handle_irq,
.init_early = snapper9260_init_early,
diff --git a/arch/arm/mach-at91/board-stamp9g20.c b/arch/arm/mach-at91/board-stamp9g20.c
index 3b57503..e825641 100644
--- a/arch/arm/mach-at91/board-stamp9g20.c
+++ b/arch/arm/mach-at91/board-stamp9g20.c
@@ -275,7 +275,7 @@
MACHINE_START(PORTUXG20, "taskit PortuxG20")
/* Maintainer: taskit GmbH */
- .init_time = at91sam926x_pit_init,
+ .init_time = at91_init_time,
.map_io = at91_map_io,
.handle_irq = at91_aic_handle_irq,
.init_early = stamp9g20_init_early,
@@ -285,7 +285,7 @@
MACHINE_START(STAMP9G20, "taskit Stamp9G20")
/* Maintainer: taskit GmbH */
- .init_time = at91sam926x_pit_init,
+ .init_time = at91_init_time,
.map_io = at91_map_io,
.handle_irq = at91_aic_handle_irq,
.init_early = stamp9g20_init_early,
diff --git a/arch/arm/mach-at91/generic.h b/arch/arm/mach-at91/generic.h
index 631fa3b..189d7c7 100644
--- a/arch/arm/mach-at91/generic.h
+++ b/arch/arm/mach-at91/generic.h
@@ -8,6 +8,9 @@
* published by the Free Software Foundation.
*/
+#ifndef _AT91_GENERIC_H
+#define _AT91_GENERIC_H
+
#include <linux/clkdev.h>
#include <linux/of.h>
#include <linux/reboot.h>
@@ -37,12 +40,15 @@
extern void __init at91_sysirq_mask_rtc(u32 rtc_base);
extern void __init at91_sysirq_mask_rtt(u32 rtt_base);
+ /* Devices */
+extern void __init at91_register_devices(void);
/* Timer */
+extern void __init at91_init_time(void);
extern void at91rm9200_ioremap_st(u32 addr);
extern void at91rm9200_timer_init(void);
extern void at91sam926x_ioremap_pit(u32 addr);
-extern void at91sam926x_pit_init(void);
+extern void at91sam926x_pit_init(int irq);
extern void at91x40_timer_init(void);
/* Clocks */
@@ -62,14 +68,6 @@
/* idle */
extern void at91sam9_idle(void);
-/* reset */
-extern void at91_ioremap_rstc(u32 base_addr);
-extern void at91sam9_alt_restart(enum reboot_mode, const char *);
-extern void at91sam9g45_restart(enum reboot_mode, const char *);
-
-/* shutdown */
-extern void at91_ioremap_shdwc(u32 base_addr);
-
/* Matrix */
extern void at91_ioremap_matrix(u32 base_addr);
@@ -90,3 +88,5 @@
struct device_node *parent);
extern u32 at91_get_extern_irq(void);
+
+#endif /* _AT91_GENERIC_H */
diff --git a/arch/arm/mach-at91/irq.c b/arch/arm/mach-at91/irq.c
index 3d192c5..cdb3ec9 100644
--- a/arch/arm/mach-at91/irq.c
+++ b/arch/arm/mach-at91/irq.c
@@ -48,11 +48,6 @@
static struct irq_domain *at91_aic_domain;
static struct device_node *at91_aic_np;
static unsigned int n_irqs = NR_AIC_IRQS;
-static unsigned long at91_aic_caps = 0;
-
-/* AIC5 introduces a Source Select Register */
-#define AT91_AIC_CAP_AIC5 (1 << 0)
-#define has_aic5() (at91_aic_caps & AT91_AIC_CAP_AIC5)
#ifdef CONFIG_PM
@@ -92,50 +87,14 @@
void at91_irq_suspend(void)
{
- int bit = -1;
-
- if (has_aic5()) {
- /* disable enabled irqs */
- while ((bit = find_next_bit(backups, n_irqs, bit + 1)) < n_irqs) {
- at91_aic_write(AT91_AIC5_SSR,
- bit & AT91_AIC5_INTSEL_MSK);
- at91_aic_write(AT91_AIC5_IDCR, 1);
- }
- /* enable wakeup irqs */
- bit = -1;
- while ((bit = find_next_bit(wakeups, n_irqs, bit + 1)) < n_irqs) {
- at91_aic_write(AT91_AIC5_SSR,
- bit & AT91_AIC5_INTSEL_MSK);
- at91_aic_write(AT91_AIC5_IECR, 1);
- }
- } else {
- at91_aic_write(AT91_AIC_IDCR, *backups);
- at91_aic_write(AT91_AIC_IECR, *wakeups);
- }
+ at91_aic_write(AT91_AIC_IDCR, *backups);
+ at91_aic_write(AT91_AIC_IECR, *wakeups);
}
void at91_irq_resume(void)
{
- int bit = -1;
-
- if (has_aic5()) {
- /* disable wakeup irqs */
- while ((bit = find_next_bit(wakeups, n_irqs, bit + 1)) < n_irqs) {
- at91_aic_write(AT91_AIC5_SSR,
- bit & AT91_AIC5_INTSEL_MSK);
- at91_aic_write(AT91_AIC5_IDCR, 1);
- }
- /* enable irqs disabled for suspend */
- bit = -1;
- while ((bit = find_next_bit(backups, n_irqs, bit + 1)) < n_irqs) {
- at91_aic_write(AT91_AIC5_SSR,
- bit & AT91_AIC5_INTSEL_MSK);
- at91_aic_write(AT91_AIC5_IECR, 1);
- }
- } else {
- at91_aic_write(AT91_AIC_IDCR, *wakeups);
- at91_aic_write(AT91_AIC_IECR, *backups);
- }
+ at91_aic_write(AT91_AIC_IDCR, *wakeups);
+ at91_aic_write(AT91_AIC_IECR, *backups);
}
#else
@@ -169,21 +128,6 @@
handle_IRQ(irqnr, regs);
}
-asmlinkage void __exception_irq_entry
-at91_aic5_handle_irq(struct pt_regs *regs)
-{
- u32 irqnr;
- u32 irqstat;
-
- irqnr = at91_aic_read(AT91_AIC5_IVR);
- irqstat = at91_aic_read(AT91_AIC5_ISR);
-
- if (!irqstat)
- at91_aic_write(AT91_AIC5_EOICR, 0);
- else
- handle_IRQ(irqnr, regs);
-}
-
static void at91_aic_mask_irq(struct irq_data *d)
{
/* Disable interrupt on AIC */
@@ -192,15 +136,6 @@
clear_backup(d->hwirq);
}
-static void __maybe_unused at91_aic5_mask_irq(struct irq_data *d)
-{
- /* Disable interrupt on AIC5 */
- at91_aic_write(AT91_AIC5_SSR, d->hwirq & AT91_AIC5_INTSEL_MSK);
- at91_aic_write(AT91_AIC5_IDCR, 1);
- /* Update ISR cache */
- clear_backup(d->hwirq);
-}
-
static void at91_aic_unmask_irq(struct irq_data *d)
{
/* Enable interrupt on AIC */
@@ -209,15 +144,6 @@
set_backup(d->hwirq);
}
-static void __maybe_unused at91_aic5_unmask_irq(struct irq_data *d)
-{
- /* Enable interrupt on AIC5 */
- at91_aic_write(AT91_AIC5_SSR, d->hwirq & AT91_AIC5_INTSEL_MSK);
- at91_aic_write(AT91_AIC5_IECR, 1);
- /* Update ISR cache */
- set_backup(d->hwirq);
-}
-
static void at91_aic_eoi(struct irq_data *d)
{
/*
@@ -227,11 +153,6 @@
at91_aic_write(AT91_AIC_EOICR, 0);
}
-static void __maybe_unused at91_aic5_eoi(struct irq_data *d)
-{
- at91_aic_write(AT91_AIC5_EOICR, 0);
-}
-
static unsigned long *at91_extern_irq;
u32 at91_get_extern_irq(void)
@@ -282,16 +203,8 @@
if (srctype < 0)
return srctype;
- if (has_aic5()) {
- at91_aic_write(AT91_AIC5_SSR,
- d->hwirq & AT91_AIC5_INTSEL_MSK);
- smr = at91_aic_read(AT91_AIC5_SMR) & ~AT91_AIC_SRCTYPE;
- at91_aic_write(AT91_AIC5_SMR, smr | srctype);
- } else {
- smr = at91_aic_read(AT91_AIC_SMR(d->hwirq))
- & ~AT91_AIC_SRCTYPE;
- at91_aic_write(AT91_AIC_SMR(d->hwirq), smr | srctype);
- }
+ smr = at91_aic_read(AT91_AIC_SMR(d->hwirq)) & ~AT91_AIC_SRCTYPE;
+ at91_aic_write(AT91_AIC_SMR(d->hwirq), smr | srctype);
return 0;
}
@@ -331,177 +244,6 @@
at91_aic_write(AT91_AIC_ICCR, 0xFFFFFFFF);
}
-static void __init __maybe_unused at91_aic5_hw_init(unsigned int spu_vector)
-{
- int i;
-
- /*
- * Perform 8 End Of Interrupt Command to make sure AIC
- * will not Lock out nIRQ
- */
- for (i = 0; i < 8; i++)
- at91_aic_write(AT91_AIC5_EOICR, 0);
-
- /*
- * Spurious Interrupt ID in Spurious Vector Register.
- * When there is no current interrupt, the IRQ Vector Register
- * reads the value stored in AIC_SPU
- */
- at91_aic_write(AT91_AIC5_SPU, spu_vector);
-
- /* No debugging in AIC: Debug (Protect) Control Register */
- at91_aic_write(AT91_AIC5_DCR, 0);
-
- /* Disable and clear all interrupts initially */
- for (i = 0; i < n_irqs; i++) {
- at91_aic_write(AT91_AIC5_SSR, i & AT91_AIC5_INTSEL_MSK);
- at91_aic_write(AT91_AIC5_IDCR, 1);
- at91_aic_write(AT91_AIC5_ICCR, 1);
- }
-}
-
-#if defined(CONFIG_OF)
-static unsigned int *at91_aic_irq_priorities;
-
-static int at91_aic_irq_map(struct irq_domain *h, unsigned int virq,
- irq_hw_number_t hw)
-{
- /* Put virq number in Source Vector Register */
- at91_aic_write(AT91_AIC_SVR(hw), virq);
-
- /* Active Low interrupt, with priority */
- at91_aic_write(AT91_AIC_SMR(hw),
- AT91_AIC_SRCTYPE_LOW | at91_aic_irq_priorities[hw]);
-
- irq_set_chip_and_handler(virq, &at91_aic_chip, handle_fasteoi_irq);
- set_irq_flags(virq, IRQF_VALID | IRQF_PROBE);
-
- return 0;
-}
-
-static int at91_aic5_irq_map(struct irq_domain *h, unsigned int virq,
- irq_hw_number_t hw)
-{
- at91_aic_write(AT91_AIC5_SSR, hw & AT91_AIC5_INTSEL_MSK);
-
- /* Put virq number in Source Vector Register */
- at91_aic_write(AT91_AIC5_SVR, virq);
-
- /* Active Low interrupt, with priority */
- at91_aic_write(AT91_AIC5_SMR,
- AT91_AIC_SRCTYPE_LOW | at91_aic_irq_priorities[hw]);
-
- irq_set_chip_and_handler(virq, &at91_aic_chip, handle_fasteoi_irq);
- set_irq_flags(virq, IRQF_VALID | IRQF_PROBE);
-
- return 0;
-}
-
-static int at91_aic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
- const u32 *intspec, unsigned int intsize,
- irq_hw_number_t *out_hwirq, unsigned int *out_type)
-{
- if (WARN_ON(intsize < 3))
- return -EINVAL;
- if (WARN_ON(intspec[0] >= n_irqs))
- return -EINVAL;
- if (WARN_ON((intspec[2] < AT91_AIC_IRQ_MIN_PRIORITY)
- || (intspec[2] > AT91_AIC_IRQ_MAX_PRIORITY)))
- return -EINVAL;
-
- *out_hwirq = intspec[0];
- *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
- at91_aic_irq_priorities[*out_hwirq] = intspec[2];
-
- return 0;
-}
-
-static struct irq_domain_ops at91_aic_irq_ops = {
- .map = at91_aic_irq_map,
- .xlate = at91_aic_irq_domain_xlate,
-};
-
-int __init at91_aic_of_common_init(struct device_node *node,
- struct device_node *parent)
-{
- struct property *prop;
- const __be32 *p;
- u32 val;
-
- at91_extern_irq = kzalloc(BITS_TO_LONGS(n_irqs)
- * sizeof(*at91_extern_irq), GFP_KERNEL);
- if (!at91_extern_irq)
- return -ENOMEM;
-
- if (at91_aic_pm_init()) {
- kfree(at91_extern_irq);
- return -ENOMEM;
- }
-
- at91_aic_irq_priorities = kzalloc(n_irqs
- * sizeof(*at91_aic_irq_priorities),
- GFP_KERNEL);
- if (!at91_aic_irq_priorities)
- return -ENOMEM;
-
- at91_aic_base = of_iomap(node, 0);
- at91_aic_np = node;
-
- at91_aic_domain = irq_domain_add_linear(at91_aic_np, n_irqs,
- &at91_aic_irq_ops, NULL);
- if (!at91_aic_domain)
- panic("Unable to add AIC irq domain (DT)\n");
-
- of_property_for_each_u32(node, "atmel,external-irqs", prop, p, val) {
- if (val >= n_irqs)
- pr_warn("AIC: external irq %d >= %d skip it\n",
- val, n_irqs);
- else
- set_bit(val, at91_extern_irq);
- }
-
- irq_set_default_host(at91_aic_domain);
-
- return 0;
-}
-
-int __init at91_aic_of_init(struct device_node *node,
- struct device_node *parent)
-{
- int err;
-
- err = at91_aic_of_common_init(node, parent);
- if (err)
- return err;
-
- at91_aic_hw_init(n_irqs);
-
- return 0;
-}
-
-int __init at91_aic5_of_init(struct device_node *node,
- struct device_node *parent)
-{
- int err;
-
- at91_aic_caps |= AT91_AIC_CAP_AIC5;
- n_irqs = NR_AIC5_IRQS;
- at91_aic_chip.irq_ack = at91_aic5_mask_irq;
- at91_aic_chip.irq_mask = at91_aic5_mask_irq;
- at91_aic_chip.irq_unmask = at91_aic5_unmask_irq;
- at91_aic_chip.irq_eoi = at91_aic5_eoi;
- at91_aic_irq_ops.map = at91_aic5_irq_map;
-
- err = at91_aic_of_common_init(node, parent);
- if (err)
- return err;
-
- at91_aic5_hw_init(n_irqs);
-
- return 0;
-}
-#endif
-
/*
* Initialize the AIC interrupt controller.
*/
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index e955545..4073ab7 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -34,79 +34,8 @@
#include "pm.h"
#include "gpio.h"
-/*
- * Show the reason for the previous system reset.
- */
-
-#include "at91_rstc.h"
-#include "at91_shdwc.h"
-
static void (*at91_pm_standby)(void);
-static void __init show_reset_status(void)
-{
- static char reset[] __initdata = "reset";
-
- static char general[] __initdata = "general";
- static char wakeup[] __initdata = "wakeup";
- static char watchdog[] __initdata = "watchdog";
- static char software[] __initdata = "software";
- static char user[] __initdata = "user";
- static char unknown[] __initdata = "unknown";
-
- static char signal[] __initdata = "signal";
- static char rtc[] __initdata = "rtc";
- static char rtt[] __initdata = "rtt";
- static char restore[] __initdata = "power-restored";
-
- char *reason, *r2 = reset;
- u32 reset_type, wake_type;
-
- if (!at91_shdwc_base || !at91_rstc_base)
- return;
-
- reset_type = at91_rstc_read(AT91_RSTC_SR) & AT91_RSTC_RSTTYP;
- wake_type = at91_shdwc_read(AT91_SHDW_SR);
-
- switch (reset_type) {
- case AT91_RSTC_RSTTYP_GENERAL:
- reason = general;
- break;
- case AT91_RSTC_RSTTYP_WAKEUP:
- /* board-specific code enabled the wakeup sources */
- reason = wakeup;
-
- /* "wakeup signal" */
- if (wake_type & AT91_SHDW_WAKEUP0)
- r2 = signal;
- else {
- r2 = reason;
- if (wake_type & AT91_SHDW_RTTWK) /* rtt wakeup */
- reason = rtt;
- else if (wake_type & AT91_SHDW_RTCWK) /* rtc wakeup */
- reason = rtc;
- else if (wake_type == 0) /* power-restored wakeup */
- reason = restore;
- else /* unknown wakeup */
- reason = unknown;
- }
- break;
- case AT91_RSTC_RSTTYP_WATCHDOG:
- reason = watchdog;
- break;
- case AT91_RSTC_RSTTYP_SOFTWARE:
- reason = software;
- break;
- case AT91_RSTC_RSTTYP_USER:
- reason = user;
- break;
- default:
- reason = unknown;
- break;
- }
- pr_info("AT91: Starting after %s %s\n", reason, r2);
-}
-
static int at91_pm_valid_state(suspend_state_t state)
{
switch (state) {
@@ -206,16 +135,19 @@
at91_pinctrl_gpio_suspend();
else
at91_gpio_suspend();
- at91_irq_suspend();
- pr_debug("AT91: PM - wake mask %08x, pm state %d\n",
- /* remember all the always-wake irqs */
- (at91_pmc_read(AT91_PMC_PCSR)
- | (1 << AT91_ID_FIQ)
- | (1 << AT91_ID_SYS)
- | (at91_get_extern_irq()))
- & at91_aic_read(AT91_AIC_IMR),
- state);
+ if (IS_ENABLED(CONFIG_OLD_IRQ_AT91) && at91_aic_base) {
+ at91_irq_suspend();
+
+ pr_debug("AT91: PM - wake mask %08x, pm state %d\n",
+ /* remember all the always-wake irqs */
+ (at91_pmc_read(AT91_PMC_PCSR)
+ | (1 << AT91_ID_FIQ)
+ | (1 << AT91_ID_SYS)
+ | (at91_get_extern_irq()))
+ & at91_aic_read(AT91_AIC_IMR),
+ state);
+ }
switch (state) {
/*
@@ -280,12 +212,17 @@
goto error;
}
- pr_debug("AT91: PM - wakeup %08x\n",
- at91_aic_read(AT91_AIC_IPR) & at91_aic_read(AT91_AIC_IMR));
+ if (IS_ENABLED(CONFIG_OLD_IRQ_AT91) && at91_aic_base)
+ pr_debug("AT91: PM - wakeup %08x\n",
+ at91_aic_read(AT91_AIC_IPR) &
+ at91_aic_read(AT91_AIC_IMR));
error:
target_state = PM_SUSPEND_ON;
- at91_irq_resume();
+
+ if (IS_ENABLED(CONFIG_OLD_IRQ_AT91) && at91_aic_base)
+ at91_irq_resume();
+
if (of_have_populated_dt())
at91_pinctrl_gpio_resume();
else
@@ -338,7 +275,6 @@
suspend_set_ops(&at91_pm_ops);
- show_reset_status();
return 0;
}
arch_initcall(at91_pm_init);
diff --git a/arch/arm/mach-at91/setup.c b/arch/arm/mach-at91/setup.c
index f7a07a5..51c22a2 100644
--- a/arch/arm/mach-at91/setup.c
+++ b/arch/arm/mach-at91/setup.c
@@ -5,6 +5,8 @@
* Under GPLv2
*/
+#define pr_fmt(fmt) "AT91: " fmt
+
#include <linux/module.h>
#include <linux/io.h>
#include <linux/mm.h>
@@ -20,7 +22,6 @@
#include <mach/cpu.h>
#include <mach/at91_dbgu.h>
-#include "at91_shdwc.h"
#include "soc.h"
#include "generic.h"
#include "pm.h"
@@ -37,7 +38,7 @@
else
at91_soc_initdata.subtype = AT91_SOC_RM9200_BGA;
- pr_info("AT91: filled in soc subtype: %s\n",
+ pr_info("filled in soc subtype: %s\n",
at91_get_soc_subtype(&at91_soc_initdata));
}
@@ -49,7 +50,8 @@
void __init at91_init_interrupts(unsigned int *priority)
{
/* Initialize the AIC interrupt controller */
- at91_aic_init(priority, at91_boot_soc.extern_irq);
+ if (IS_ENABLED(CONFIG_OLD_IRQ_AT91))
+ at91_aic_init(priority, at91_boot_soc.extern_irq);
/* Enable GPIO interrupts */
at91_gpio_irq_setup();
@@ -66,7 +68,7 @@
}
at91_ramc_base[id] = ioremap(addr, size);
if (!at91_ramc_base[id])
- panic("Impossible to ioremap ramc.%d 0x%x\n", id, addr);
+ panic(pr_fmt("Impossible to ioremap ramc.%d 0x%x\n"), id, addr);
}
static struct map_desc sram_desc[2] __initdata;
@@ -83,7 +85,7 @@
desc->length = length;
desc->type = MT_MEMORY_RWX_NONCACHED;
- pr_info("AT91: sram at 0x%lx of 0x%x mapped at 0x%lx\n",
+ pr_info("sram at 0x%lx of 0x%x mapped at 0x%lx\n",
base, length, desc->virtual);
iotable_init(desc, 1);
@@ -302,45 +304,21 @@
soc_detect(AT91_BASE_DBGU1);
if (!at91_soc_is_detected())
- panic("AT91: Impossible to detect the SOC type");
+ panic(pr_fmt("Impossible to detect the SOC type"));
- pr_info("AT91: Detected soc type: %s\n",
+ pr_info("Detected soc type: %s\n",
at91_get_soc_type(&at91_soc_initdata));
if (at91_soc_initdata.subtype != AT91_SOC_SUBTYPE_NONE)
- pr_info("AT91: Detected soc subtype: %s\n",
+ pr_info("Detected soc subtype: %s\n",
at91_get_soc_subtype(&at91_soc_initdata));
if (!at91_soc_is_enabled())
- panic("AT91: Soc not enabled");
+ panic(pr_fmt("Soc not enabled"));
if (at91_boot_soc.map_io)
at91_boot_soc.map_io();
}
-void __iomem *at91_shdwc_base = NULL;
-
-static void at91sam9_poweroff(void)
-{
- at91_shdwc_write(AT91_SHDW_CR, AT91_SHDW_KEY | AT91_SHDW_SHDW);
-}
-
-void __init at91_ioremap_shdwc(u32 base_addr)
-{
- at91_shdwc_base = ioremap(base_addr, 16);
- if (!at91_shdwc_base)
- panic("Impossible to ioremap at91_shdwc_base\n");
- pm_power_off = at91sam9_poweroff;
-}
-
-void __iomem *at91_rstc_base;
-
-void __init at91_ioremap_rstc(u32 base_addr)
-{
- at91_rstc_base = ioremap(base_addr, 16);
- if (!at91_rstc_base)
- panic("Impossible to ioremap at91_rstc_base\n");
-}
-
void __iomem *at91_matrix_base;
EXPORT_SYMBOL_GPL(at91_matrix_base);
@@ -348,42 +326,15 @@
{
at91_matrix_base = ioremap(base_addr, 512);
if (!at91_matrix_base)
- panic("Impossible to ioremap at91_matrix_base\n");
+ panic(pr_fmt("Impossible to ioremap at91_matrix_base\n"));
}
#if defined(CONFIG_OF) && !defined(CONFIG_ARCH_AT91X40)
-static struct of_device_id rstc_ids[] = {
- { .compatible = "atmel,at91sam9260-rstc", .data = at91sam9_alt_restart },
- { .compatible = "atmel,at91sam9g45-rstc", .data = at91sam9g45_restart },
- { /*sentinel*/ }
-};
-
-static void at91_dt_rstc(void)
-{
- struct device_node *np;
- const struct of_device_id *of_id;
-
- np = of_find_matching_node(NULL, rstc_ids);
- if (!np)
- panic("unable to find compatible rstc node in dtb\n");
-
- at91_rstc_base = of_iomap(np, 0);
- if (!at91_rstc_base)
- panic("unable to map rstc cpu registers\n");
-
- of_id = of_match_node(rstc_ids, np);
- if (!of_id)
- panic("AT91: rtsc no restart function available\n");
-
- arm_pm_restart = of_id->data;
-
- of_node_put(np);
-}
-
static struct of_device_id ramc_ids[] = {
{ .compatible = "atmel,at91rm9200-sdramc", .data = at91rm9200_standby },
{ .compatible = "atmel,at91sam9260-sdramc", .data = at91sam9_sdram_standby },
{ .compatible = "atmel,at91sam9g45-ddramc", .data = at91_ddr_standby },
+ { .compatible = "atmel,sama5d3-ddramc", .data = at91_ddr_standby },
{ /*sentinel*/ }
};
@@ -391,100 +342,29 @@
{
struct device_node *np;
const struct of_device_id *of_id;
+ int idx = 0;
+ const void *standby = NULL;
- np = of_find_matching_node(NULL, ramc_ids);
- if (!np)
- panic("unable to find compatible ram controller node in dtb\n");
+ for_each_matching_node_and_match(np, ramc_ids, &of_id) {
+ at91_ramc_base[idx] = of_iomap(np, 0);
+ if (!at91_ramc_base[idx])
+ panic(pr_fmt("unable to map ramc[%d] cpu registers\n"), idx);
- at91_ramc_base[0] = of_iomap(np, 0);
- if (!at91_ramc_base[0])
- panic("unable to map ramc[0] cpu registers\n");
- /* the controller may have 2 banks */
- at91_ramc_base[1] = of_iomap(np, 1);
+ if (!standby)
+ standby = of_id->data;
- of_id = of_match_node(ramc_ids, np);
- if (!of_id)
- pr_warn("AT91: ramc no standby function available\n");
- else
- at91_pm_set_standby(of_id->data);
+ idx++;
+ }
- of_node_put(np);
-}
+ if (!idx)
+ panic(pr_fmt("unable to find compatible ram controller node in dtb\n"));
-static struct of_device_id shdwc_ids[] = {
- { .compatible = "atmel,at91sam9260-shdwc", },
- { .compatible = "atmel,at91sam9rl-shdwc", },
- { .compatible = "atmel,at91sam9x5-shdwc", },
- { /*sentinel*/ }
-};
-
-static const char *shdwc_wakeup_modes[] = {
- [AT91_SHDW_WKMODE0_NONE] = "none",
- [AT91_SHDW_WKMODE0_HIGH] = "high",
- [AT91_SHDW_WKMODE0_LOW] = "low",
- [AT91_SHDW_WKMODE0_ANYLEVEL] = "any",
-};
-
-const int at91_dtget_shdwc_wakeup_mode(struct device_node *np)
-{
- const char *pm;
- int err, i;
-
- err = of_property_read_string(np, "atmel,wakeup-mode", &pm);
- if (err < 0)
- return AT91_SHDW_WKMODE0_ANYLEVEL;
-
- for (i = 0; i < ARRAY_SIZE(shdwc_wakeup_modes); i++)
- if (!strcasecmp(pm, shdwc_wakeup_modes[i]))
- return i;
-
- return -ENODEV;
-}
-
-static void at91_dt_shdwc(void)
-{
- struct device_node *np;
- int wakeup_mode;
- u32 reg;
- u32 mode = 0;
-
- np = of_find_matching_node(NULL, shdwc_ids);
- if (!np) {
- pr_debug("AT91: unable to find compatible shutdown (shdwc) controller node in dtb\n");
+ if (!standby) {
+ pr_warn("ramc no standby function available\n");
return;
}
- at91_shdwc_base = of_iomap(np, 0);
- if (!at91_shdwc_base)
- panic("AT91: unable to map shdwc cpu registers\n");
-
- wakeup_mode = at91_dtget_shdwc_wakeup_mode(np);
- if (wakeup_mode < 0) {
- pr_warn("AT91: shdwc unknown wakeup mode\n");
- goto end;
- }
-
- if (!of_property_read_u32(np, "atmel,wakeup-counter", ®)) {
- if (reg > AT91_SHDW_CPTWK0_MAX) {
- pr_warn("AT91: shdwc wakeup counter 0x%x > 0x%x reduce it to 0x%x\n",
- reg, AT91_SHDW_CPTWK0_MAX, AT91_SHDW_CPTWK0_MAX);
- reg = AT91_SHDW_CPTWK0_MAX;
- }
- mode |= AT91_SHDW_CPTWK0_(reg);
- }
-
- if (of_property_read_bool(np, "atmel,wakeup-rtc-timer"))
- mode |= AT91_SHDW_RTCWKEN;
-
- if (of_property_read_bool(np, "atmel,wakeup-rtt-timer"))
- mode |= AT91_SHDW_RTTWKEN;
-
- at91_shdwc_write(AT91_SHDW_MR, wakeup_mode | mode);
-
-end:
- pm_power_off = at91sam9_poweroff;
-
- of_node_put(np);
+ at91_pm_set_standby(standby);
}
void __init at91rm9200_dt_initialize(void)
@@ -503,9 +383,7 @@
void __init at91_dt_initialize(void)
{
- at91_dt_rstc();
at91_dt_ramc();
- at91_dt_shdwc();
/* Init clock subsystem */
at91_dt_clock_init();
@@ -533,3 +411,13 @@
pinctrl_provide_dummies();
}
+
+void __init at91_register_devices(void)
+{
+ at91_boot_soc.register_devices();
+}
+
+void __init at91_init_time(void)
+{
+ at91_boot_soc.init_time();
+}
diff --git a/arch/arm/mach-at91/soc.h b/arch/arm/mach-at91/soc.h
index a1e1482..2886b83 100644
--- a/arch/arm/mach-at91/soc.h
+++ b/arch/arm/mach-at91/soc.h
@@ -11,7 +11,9 @@
void (*map_io)(void);
void (*ioremap_registers)(void);
void (*register_clocks)(void);
+ void (*register_devices)(void);
void (*init)(void);
+ void (*init_time)(void);
};
extern struct at91_init_soc at91_boot_soc;
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 622fa26..1a693d3 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -148,6 +148,8 @@
source "drivers/rpmsg/Kconfig"
+source "drivers/soc/Kconfig"
+
source "drivers/devfreq/Kconfig"
source "drivers/extcon/Kconfig"
diff --git a/drivers/clk/at91/clk-system.c b/drivers/clk/at91/clk-system.c
index 8c96307..a76d03f 100644
--- a/drivers/clk/at91/clk-system.c
+++ b/drivers/clk/at91/clk-system.c
@@ -119,13 +119,7 @@
init.ops = &system_ops;
init.parent_names = &parent_name;
init.num_parents = 1;
- /*
- * CLK_IGNORE_UNUSED is used to avoid ddrck switch off.
- * TODO : we should implement a driver supporting at91 ddr controller
- * (see drivers/memory) which would request and enable the ddrck clock.
- * When this is done we will be able to remove CLK_IGNORE_UNUSED flag.
- */
- init.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED;
+ init.flags = CLK_SET_RATE_PARENT;
sys->id = id;
sys->hw.init = &init;
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index cfd6519..82a2ebe 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -120,6 +120,10 @@
help
Use ARM global timer clock source as sched_clock
+config ATMEL_PIT
+ select CLKSRC_OF if OF
+ def_bool SOC_AT91SAM9 || SOC_SAMA5
+
config CLKSRC_METAG_GENERIC
def_bool y if METAG
help
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 7fd9fd1..e566f6c 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -1,4 +1,5 @@
obj-$(CONFIG_CLKSRC_OF) += clksrc-of.o
+obj-$(CONFIG_ATMEL_PIT) += timer-atmel-pit.o
obj-$(CONFIG_ATMEL_TCB_CLKSRC) += tcb_clksrc.o
obj-$(CONFIG_X86_PM_TIMER) += acpi_pm.o
obj-$(CONFIG_SCx200HR_TIMER) += scx200_hrt.o
diff --git a/drivers/clocksource/timer-atmel-pit.c b/drivers/clocksource/timer-atmel-pit.c
new file mode 100644
index 0000000..d528909
--- /dev/null
+++ b/drivers/clocksource/timer-atmel-pit.c
@@ -0,0 +1,296 @@
+/*
+ * at91sam926x_time.c - Periodic Interval Timer (PIT) for at91sam926x
+ *
+ * Copyright (C) 2005-2006 M. Amine SAYA, ATMEL Rousset, France
+ * Revision 2005 M. Nicolas Diremdjian, ATMEL Rousset, France
+ * Converted to ClockSource/ClockEvents by David Brownell.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) "AT91: PIT: " fmt
+
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/slab.h>
+
+#define AT91_PIT_MR 0x00 /* Mode Register */
+#define AT91_PIT_PITIEN BIT(25) /* Timer Interrupt Enable */
+#define AT91_PIT_PITEN BIT(24) /* Timer Enabled */
+#define AT91_PIT_PIV GENMASK(19, 0) /* Periodic Interval Value */
+
+#define AT91_PIT_SR 0x04 /* Status Register */
+#define AT91_PIT_PITS BIT(0) /* Timer Status */
+
+#define AT91_PIT_PIVR 0x08 /* Periodic Interval Value Register */
+#define AT91_PIT_PIIR 0x0c /* Periodic Interval Image Register */
+#define AT91_PIT_PICNT GENMASK(31, 20) /* Interval Counter */
+#define AT91_PIT_CPIV GENMASK(19, 0) /* Inverval Value */
+
+#define PIT_CPIV(x) ((x) & AT91_PIT_CPIV)
+#define PIT_PICNT(x) (((x) & AT91_PIT_PICNT) >> 20)
+
+struct pit_data {
+ struct clock_event_device clkevt;
+ struct clocksource clksrc;
+
+ void __iomem *base;
+ u32 cycle;
+ u32 cnt;
+ unsigned int irq;
+ struct clk *mck;
+};
+
+static inline struct pit_data *clksrc_to_pit_data(struct clocksource *clksrc)
+{
+ return container_of(clksrc, struct pit_data, clksrc);
+}
+
+static inline struct pit_data *clkevt_to_pit_data(struct clock_event_device *clkevt)
+{
+ return container_of(clkevt, struct pit_data, clkevt);
+}
+
+static inline unsigned int pit_read(void __iomem *base, unsigned int reg_offset)
+{
+ return __raw_readl(base + reg_offset);
+}
+
+static inline void pit_write(void __iomem *base, unsigned int reg_offset, unsigned long value)
+{
+ __raw_writel(value, base + reg_offset);
+}
+
+/*
+ * Clocksource: just a monotonic counter of MCK/16 cycles.
+ * We don't care whether or not PIT irqs are enabled.
+ */
+static cycle_t read_pit_clk(struct clocksource *cs)
+{
+ struct pit_data *data = clksrc_to_pit_data(cs);
+ unsigned long flags;
+ u32 elapsed;
+ u32 t;
+
+ raw_local_irq_save(flags);
+ elapsed = data->cnt;
+ t = pit_read(data->base, AT91_PIT_PIIR);
+ raw_local_irq_restore(flags);
+
+ elapsed += PIT_PICNT(t) * data->cycle;
+ elapsed += PIT_CPIV(t);
+ return elapsed;
+}
+
+/*
+ * Clockevent device: interrupts every 1/HZ (== pit_cycles * MCK/16)
+ */
+static void
+pit_clkevt_mode(enum clock_event_mode mode, struct clock_event_device *dev)
+{
+ struct pit_data *data = clkevt_to_pit_data(dev);
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ /* update clocksource counter */
+ data->cnt += data->cycle * PIT_PICNT(pit_read(data->base, AT91_PIT_PIVR));
+ pit_write(data->base, AT91_PIT_MR,
+ (data->cycle - 1) | AT91_PIT_PITEN | AT91_PIT_PITIEN);
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
+ BUG();
+ /* FALLTHROUGH */
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ case CLOCK_EVT_MODE_UNUSED:
+ /* disable irq, leaving the clocksource active */
+ pit_write(data->base, AT91_PIT_MR,
+ (data->cycle - 1) | AT91_PIT_PITEN);
+ break;
+ case CLOCK_EVT_MODE_RESUME:
+ break;
+ }
+}
+
+static void at91sam926x_pit_suspend(struct clock_event_device *cedev)
+{
+ struct pit_data *data = clkevt_to_pit_data(cedev);
+
+ /* Disable timer */
+ pit_write(data->base, AT91_PIT_MR, 0);
+}
+
+static void at91sam926x_pit_reset(struct pit_data *data)
+{
+ /* Disable timer and irqs */
+ pit_write(data->base, AT91_PIT_MR, 0);
+
+ /* Clear any pending interrupts, wait for PIT to stop counting */
+ while (PIT_CPIV(pit_read(data->base, AT91_PIT_PIVR)) != 0)
+ cpu_relax();
+
+ /* Start PIT but don't enable IRQ */
+ pit_write(data->base, AT91_PIT_MR,
+ (data->cycle - 1) | AT91_PIT_PITEN);
+}
+
+static void at91sam926x_pit_resume(struct clock_event_device *cedev)
+{
+ struct pit_data *data = clkevt_to_pit_data(cedev);
+
+ at91sam926x_pit_reset(data);
+}
+
+/*
+ * IRQ handler for the timer.
+ */
+static irqreturn_t at91sam926x_pit_interrupt(int irq, void *dev_id)
+{
+ struct pit_data *data = dev_id;
+
+ /*
+ * irqs should be disabled here, but as the irq is shared they are only
+ * guaranteed to be off if the timer irq is registered first.
+ */
+ WARN_ON_ONCE(!irqs_disabled());
+
+ /* The PIT interrupt may be disabled, and is shared */
+ if ((data->clkevt.mode == CLOCK_EVT_MODE_PERIODIC) &&
+ (pit_read(data->base, AT91_PIT_SR) & AT91_PIT_PITS)) {
+ unsigned nr_ticks;
+
+ /* Get number of ticks performed before irq, and ack it */
+ nr_ticks = PIT_PICNT(pit_read(data->base, AT91_PIT_PIVR));
+ do {
+ data->cnt += data->cycle;
+ data->clkevt.event_handler(&data->clkevt);
+ nr_ticks--;
+ } while (nr_ticks);
+
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+/*
+ * Set up both clocksource and clockevent support.
+ */
+static void __init at91sam926x_pit_common_init(struct pit_data *data)
+{
+ unsigned long pit_rate;
+ unsigned bits;
+ int ret;
+
+ /*
+ * Use our actual MCK to figure out how many MCK/16 ticks per
+ * 1/HZ period (instead of a compile-time constant LATCH).
+ */
+ pit_rate = clk_get_rate(data->mck) / 16;
+ data->cycle = DIV_ROUND_CLOSEST(pit_rate, HZ);
+ WARN_ON(((data->cycle - 1) & ~AT91_PIT_PIV) != 0);
+
+ /* Initialize and enable the timer */
+ at91sam926x_pit_reset(data);
+
+ /*
+ * Register clocksource. The high order bits of PIV are unused,
+ * so this isn't a 32-bit counter unless we get clockevent irqs.
+ */
+ bits = 12 /* PICNT */ + ilog2(data->cycle) /* PIV */;
+ data->clksrc.mask = CLOCKSOURCE_MASK(bits);
+ data->clksrc.name = "pit";
+ data->clksrc.rating = 175;
+ data->clksrc.read = read_pit_clk,
+ data->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ clocksource_register_hz(&data->clksrc, pit_rate);
+
+ /* Set up irq handler */
+ ret = request_irq(data->irq, at91sam926x_pit_interrupt,
+ IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
+ "at91_tick", data);
+ if (ret)
+ panic(pr_fmt("Unable to setup IRQ\n"));
+
+ /* Set up and register clockevents */
+ data->clkevt.name = "pit";
+ data->clkevt.features = CLOCK_EVT_FEAT_PERIODIC;
+ data->clkevt.shift = 32;
+ data->clkevt.mult = div_sc(pit_rate, NSEC_PER_SEC, data->clkevt.shift);
+ data->clkevt.rating = 100;
+ data->clkevt.cpumask = cpumask_of(0);
+
+ data->clkevt.set_mode = pit_clkevt_mode;
+ data->clkevt.resume = at91sam926x_pit_resume;
+ data->clkevt.suspend = at91sam926x_pit_suspend;
+ clockevents_register_device(&data->clkevt);
+}
+
+static void __init at91sam926x_pit_dt_init(struct device_node *node)
+{
+ struct pit_data *data;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ panic(pr_fmt("Unable to allocate memory\n"));
+
+ data->base = of_iomap(node, 0);
+ if (!data->base)
+ panic(pr_fmt("Could not map PIT address\n"));
+
+ data->mck = of_clk_get(node, 0);
+ if (IS_ERR(data->mck))
+ /* Fallback on clkdev for !CCF-based boards */
+ data->mck = clk_get(NULL, "mck");
+
+ if (IS_ERR(data->mck))
+ panic(pr_fmt("Unable to get mck clk\n"));
+
+ /* Get the interrupts property */
+ data->irq = irq_of_parse_and_map(node, 0);
+ if (!data->irq)
+ panic(pr_fmt("Unable to get IRQ from DT\n"));
+
+ at91sam926x_pit_common_init(data);
+}
+CLOCKSOURCE_OF_DECLARE(at91sam926x_pit, "atmel,at91sam9260-pit",
+ at91sam926x_pit_dt_init);
+
+static void __iomem *pit_base_addr;
+
+void __init at91sam926x_pit_init(int irq)
+{
+ struct pit_data *data;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ panic(pr_fmt("Unable to allocate memory\n"));
+
+ data->base = pit_base_addr;
+
+ data->mck = clk_get(NULL, "mck");
+ if (IS_ERR(data->mck))
+ panic(pr_fmt("Unable to get mck clk\n"));
+
+ data->irq = irq;
+
+ at91sam926x_pit_common_init(data);
+}
+
+void __init at91sam926x_ioremap_pit(u32 addr)
+{
+ if (of_have_populated_dt())
+ return;
+
+ pit_base_addr = ioremap(addr, 16);
+
+ if (!pit_base_addr)
+ panic(pr_fmt("Impossible to ioremap PIT\n"));
+}
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index fd89ca9..7072c28 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -376,4 +376,13 @@
Support for error detection and correction on the
Cavium Octeon family of SOCs.
+config EDAC_ALTERA_MC
+ tristate "Altera SDRAM Memory Controller EDAC"
+ depends on EDAC_MM_EDAC && ARCH_SOCFPGA
+ help
+ Support for error detection and correction on the
+ Altera SDRAM memory controller. Note that the
+ preloader must initialize the SDRAM before loading
+ the kernel.
+
endif # EDAC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index c479a24..359aa49 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -65,3 +65,5 @@
obj-$(CONFIG_EDAC_OCTEON_L2C) += octeon_edac-l2c.o
obj-$(CONFIG_EDAC_OCTEON_LMC) += octeon_edac-lmc.o
obj-$(CONFIG_EDAC_OCTEON_PCI) += octeon_edac-pci.o
+
+obj-$(CONFIG_EDAC_ALTERA_MC) += altera_edac.o
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
new file mode 100644
index 0000000..3c4929f
--- /dev/null
+++ b/drivers/edac/altera_edac.c
@@ -0,0 +1,410 @@
+/*
+ * Copyright Altera Corporation (C) 2014. All rights reserved.
+ * Copyright 2011-2012 Calxeda, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Adapted from the highbank_mc_edac driver.
+ */
+
+#include <linux/ctype.h>
+#include <linux/edac.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+
+#include "edac_core.h"
+#include "edac_module.h"
+
+#define EDAC_MOD_STR "altera_edac"
+#define EDAC_VERSION "1"
+
+/* SDRAM Controller CtrlCfg Register */
+#define CTLCFG_OFST 0x00
+
+/* SDRAM Controller CtrlCfg Register Bit Masks */
+#define CTLCFG_ECC_EN 0x400
+#define CTLCFG_ECC_CORR_EN 0x800
+#define CTLCFG_GEN_SB_ERR 0x2000
+#define CTLCFG_GEN_DB_ERR 0x4000
+
+#define CTLCFG_ECC_AUTO_EN (CTLCFG_ECC_EN | \
+ CTLCFG_ECC_CORR_EN)
+
+/* SDRAM Controller Address Width Register */
+#define DRAMADDRW_OFST 0x2C
+
+/* SDRAM Controller Address Widths Field Register */
+#define DRAMADDRW_COLBIT_MASK 0x001F
+#define DRAMADDRW_COLBIT_SHIFT 0
+#define DRAMADDRW_ROWBIT_MASK 0x03E0
+#define DRAMADDRW_ROWBIT_SHIFT 5
+#define DRAMADDRW_BANKBIT_MASK 0x1C00
+#define DRAMADDRW_BANKBIT_SHIFT 10
+#define DRAMADDRW_CSBIT_MASK 0xE000
+#define DRAMADDRW_CSBIT_SHIFT 13
+
+/* SDRAM Controller Interface Data Width Register */
+#define DRAMIFWIDTH_OFST 0x30
+
+/* SDRAM Controller Interface Data Width Defines */
+#define DRAMIFWIDTH_16B_ECC 24
+#define DRAMIFWIDTH_32B_ECC 40
+
+/* SDRAM Controller DRAM Status Register */
+#define DRAMSTS_OFST 0x38
+
+/* SDRAM Controller DRAM Status Register Bit Masks */
+#define DRAMSTS_SBEERR 0x04
+#define DRAMSTS_DBEERR 0x08
+#define DRAMSTS_CORR_DROP 0x10
+
+/* SDRAM Controller DRAM IRQ Register */
+#define DRAMINTR_OFST 0x3C
+
+/* SDRAM Controller DRAM IRQ Register Bit Masks */
+#define DRAMINTR_INTREN 0x01
+#define DRAMINTR_SBEMASK 0x02
+#define DRAMINTR_DBEMASK 0x04
+#define DRAMINTR_CORRDROPMASK 0x08
+#define DRAMINTR_INTRCLR 0x10
+
+/* SDRAM Controller Single Bit Error Count Register */
+#define SBECOUNT_OFST 0x40
+
+/* SDRAM Controller Single Bit Error Count Register Bit Masks */
+#define SBECOUNT_MASK 0x0F
+
+/* SDRAM Controller Double Bit Error Count Register */
+#define DBECOUNT_OFST 0x44
+
+/* SDRAM Controller Double Bit Error Count Register Bit Masks */
+#define DBECOUNT_MASK 0x0F
+
+/* SDRAM Controller ECC Error Address Register */
+#define ERRADDR_OFST 0x48
+
+/* SDRAM Controller ECC Error Address Register Bit Masks */
+#define ERRADDR_MASK 0xFFFFFFFF
+
+/* Altera SDRAM Memory Controller data */
+struct altr_sdram_mc_data {
+ struct regmap *mc_vbase;
+};
+
+static irqreturn_t altr_sdram_mc_err_handler(int irq, void *dev_id)
+{
+ struct mem_ctl_info *mci = dev_id;
+ struct altr_sdram_mc_data *drvdata = mci->pvt_info;
+ u32 status, err_count, err_addr;
+
+ /* Error Address is shared by both SBE & DBE */
+ regmap_read(drvdata->mc_vbase, ERRADDR_OFST, &err_addr);
+
+ regmap_read(drvdata->mc_vbase, DRAMSTS_OFST, &status);
+
+ if (status & DRAMSTS_DBEERR) {
+ regmap_read(drvdata->mc_vbase, DBECOUNT_OFST, &err_count);
+ panic("\nEDAC: [%d Uncorrectable errors @ 0x%08X]\n",
+ err_count, err_addr);
+ }
+ if (status & DRAMSTS_SBEERR) {
+ regmap_read(drvdata->mc_vbase, SBECOUNT_OFST, &err_count);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, err_count,
+ err_addr >> PAGE_SHIFT,
+ err_addr & ~PAGE_MASK, 0,
+ 0, 0, -1, mci->ctl_name, "");
+ }
+
+ regmap_write(drvdata->mc_vbase, DRAMINTR_OFST,
+ (DRAMINTR_INTRCLR | DRAMINTR_INTREN));
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_EDAC_DEBUG
+static ssize_t altr_sdr_mc_err_inject_write(struct file *file,
+ const char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct mem_ctl_info *mci = file->private_data;
+ struct altr_sdram_mc_data *drvdata = mci->pvt_info;
+ u32 *ptemp;
+ dma_addr_t dma_handle;
+ u32 reg, read_reg;
+
+ ptemp = dma_alloc_coherent(mci->pdev, 16, &dma_handle, GFP_KERNEL);
+ if (!ptemp) {
+ dma_free_coherent(mci->pdev, 16, ptemp, dma_handle);
+ edac_printk(KERN_ERR, EDAC_MC,
+ "Inject: Buffer Allocation error\n");
+ return -ENOMEM;
+ }
+
+ regmap_read(drvdata->mc_vbase, CTLCFG_OFST, &read_reg);
+ read_reg &= ~(CTLCFG_GEN_SB_ERR | CTLCFG_GEN_DB_ERR);
+
+ /* Error are injected by writing a word while the SBE or DBE
+ * bit in the CTLCFG register is set. Reading the word will
+ * trigger the SBE or DBE error and the corresponding IRQ.
+ */
+ if (count == 3) {
+ edac_printk(KERN_ALERT, EDAC_MC,
+ "Inject Double bit error\n");
+ regmap_write(drvdata->mc_vbase, CTLCFG_OFST,
+ (read_reg | CTLCFG_GEN_DB_ERR));
+ } else {
+ edac_printk(KERN_ALERT, EDAC_MC,
+ "Inject Single bit error\n");
+ regmap_write(drvdata->mc_vbase, CTLCFG_OFST,
+ (read_reg | CTLCFG_GEN_SB_ERR));
+ }
+
+ ptemp[0] = 0x5A5A5A5A;
+ ptemp[1] = 0xA5A5A5A5;
+
+ /* Clear the error injection bits */
+ regmap_write(drvdata->mc_vbase, CTLCFG_OFST, read_reg);
+ /* Ensure it has been written out */
+ wmb();
+
+ /*
+ * To trigger the error, we need to read the data back
+ * (the data was written with errors above).
+ * The ACCESS_ONCE macros and printk are used to prevent the
+ * the compiler optimizing these reads out.
+ */
+ reg = ACCESS_ONCE(ptemp[0]);
+ read_reg = ACCESS_ONCE(ptemp[1]);
+ /* Force Read */
+ rmb();
+
+ edac_printk(KERN_ALERT, EDAC_MC, "Read Data [0x%X, 0x%X]\n",
+ reg, read_reg);
+
+ dma_free_coherent(mci->pdev, 16, ptemp, dma_handle);
+
+ return count;
+}
+
+static const struct file_operations altr_sdr_mc_debug_inject_fops = {
+ .open = simple_open,
+ .write = altr_sdr_mc_err_inject_write,
+ .llseek = generic_file_llseek,
+};
+
+static void altr_sdr_mc_create_debugfs_nodes(struct mem_ctl_info *mci)
+{
+ if (mci->debugfs)
+ debugfs_create_file("inject_ctrl", S_IWUSR, mci->debugfs, mci,
+ &altr_sdr_mc_debug_inject_fops);
+}
+#else
+static void altr_sdr_mc_create_debugfs_nodes(struct mem_ctl_info *mci)
+{}
+#endif
+
+/* Get total memory size in bytes */
+static u32 altr_sdram_get_total_mem_size(struct regmap *mc_vbase)
+{
+ u32 size, read_reg, row, bank, col, cs, width;
+
+ if (regmap_read(mc_vbase, DRAMADDRW_OFST, &read_reg) < 0)
+ return 0;
+
+ if (regmap_read(mc_vbase, DRAMIFWIDTH_OFST, &width) < 0)
+ return 0;
+
+ col = (read_reg & DRAMADDRW_COLBIT_MASK) >>
+ DRAMADDRW_COLBIT_SHIFT;
+ row = (read_reg & DRAMADDRW_ROWBIT_MASK) >>
+ DRAMADDRW_ROWBIT_SHIFT;
+ bank = (read_reg & DRAMADDRW_BANKBIT_MASK) >>
+ DRAMADDRW_BANKBIT_SHIFT;
+ cs = (read_reg & DRAMADDRW_CSBIT_MASK) >>
+ DRAMADDRW_CSBIT_SHIFT;
+
+ /* Correct for ECC as its not addressible */
+ if (width == DRAMIFWIDTH_32B_ECC)
+ width = 32;
+ if (width == DRAMIFWIDTH_16B_ECC)
+ width = 16;
+
+ /* calculate the SDRAM size base on this info */
+ size = 1 << (row + bank + col);
+ size = size * cs * (width / 8);
+ return size;
+}
+
+static int altr_sdram_probe(struct platform_device *pdev)
+{
+ struct edac_mc_layer layers[2];
+ struct mem_ctl_info *mci;
+ struct altr_sdram_mc_data *drvdata;
+ struct regmap *mc_vbase;
+ struct dimm_info *dimm;
+ u32 read_reg, mem_size;
+ int irq;
+ int res = 0;
+
+ /* Validate the SDRAM controller has ECC enabled */
+ /* Grab the register range from the sdr controller in device tree */
+ mc_vbase = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "altr,sdr-syscon");
+ if (IS_ERR(mc_vbase)) {
+ edac_printk(KERN_ERR, EDAC_MC,
+ "regmap for altr,sdr-syscon lookup failed.\n");
+ return -ENODEV;
+ }
+
+ if (regmap_read(mc_vbase, CTLCFG_OFST, &read_reg) ||
+ ((read_reg & CTLCFG_ECC_AUTO_EN) != CTLCFG_ECC_AUTO_EN)) {
+ edac_printk(KERN_ERR, EDAC_MC,
+ "No ECC/ECC disabled [0x%08X]\n", read_reg);
+ return -ENODEV;
+ }
+
+ /* Grab memory size from device tree. */
+ mem_size = altr_sdram_get_total_mem_size(mc_vbase);
+ if (!mem_size) {
+ edac_printk(KERN_ERR, EDAC_MC,
+ "Unable to calculate memory size\n");
+ return -ENODEV;
+ }
+
+ /* Ensure the SDRAM Interrupt is disabled and cleared */
+ if (regmap_write(mc_vbase, DRAMINTR_OFST, DRAMINTR_INTRCLR)) {
+ edac_printk(KERN_ERR, EDAC_MC,
+ "Error clearing SDRAM ECC IRQ\n");
+ return -ENODEV;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ edac_printk(KERN_ERR, EDAC_MC,
+ "No irq %d in DT\n", irq);
+ return -ENODEV;
+ }
+
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = 1;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = 1;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
+ sizeof(struct altr_sdram_mc_data));
+ if (!mci)
+ return -ENOMEM;
+
+ mci->pdev = &pdev->dev;
+ drvdata = mci->pvt_info;
+ drvdata->mc_vbase = mc_vbase;
+ platform_set_drvdata(pdev, mci);
+
+ if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) {
+ res = -ENOMEM;
+ goto free;
+ }
+
+ mci->mtype_cap = MEM_FLAG_DDR3;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
+ mci->edac_cap = EDAC_FLAG_SECDED;
+ mci->mod_name = EDAC_MOD_STR;
+ mci->mod_ver = EDAC_VERSION;
+ mci->ctl_name = dev_name(&pdev->dev);
+ mci->scrub_mode = SCRUB_SW_SRC;
+ mci->dev_name = dev_name(&pdev->dev);
+
+ dimm = *mci->dimms;
+ dimm->nr_pages = ((mem_size - 1) >> PAGE_SHIFT) + 1;
+ dimm->grain = 8;
+ dimm->dtype = DEV_X8;
+ dimm->mtype = MEM_DDR3;
+ dimm->edac_mode = EDAC_SECDED;
+
+ res = edac_mc_add_mc(mci);
+ if (res < 0)
+ goto err;
+
+ res = devm_request_irq(&pdev->dev, irq, altr_sdram_mc_err_handler,
+ 0, dev_name(&pdev->dev), mci);
+ if (res < 0) {
+ edac_mc_printk(mci, KERN_ERR,
+ "Unable to request irq %d\n", irq);
+ res = -ENODEV;
+ goto err2;
+ }
+
+ if (regmap_write(drvdata->mc_vbase, DRAMINTR_OFST,
+ (DRAMINTR_INTRCLR | DRAMINTR_INTREN))) {
+ edac_mc_printk(mci, KERN_ERR,
+ "Error enabling SDRAM ECC IRQ\n");
+ res = -ENODEV;
+ goto err2;
+ }
+
+ altr_sdr_mc_create_debugfs_nodes(mci);
+
+ devres_close_group(&pdev->dev, NULL);
+
+ return 0;
+
+err2:
+ edac_mc_del_mc(&pdev->dev);
+err:
+ devres_release_group(&pdev->dev, NULL);
+free:
+ edac_mc_free(mci);
+ edac_printk(KERN_ERR, EDAC_MC,
+ "EDAC Probe Failed; Error %d\n", res);
+
+ return res;
+}
+
+static int altr_sdram_remove(struct platform_device *pdev)
+{
+ struct mem_ctl_info *mci = platform_get_drvdata(pdev);
+
+ edac_mc_del_mc(&pdev->dev);
+ edac_mc_free(mci);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id altr_sdram_ctrl_of_match[] = {
+ { .compatible = "altr,sdram-edac", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, altr_sdram_ctrl_of_match);
+
+static struct platform_driver altr_sdram_edac_driver = {
+ .probe = altr_sdram_probe,
+ .remove = altr_sdram_remove,
+ .driver = {
+ .name = "altr_sdram_edac",
+ .of_match_table = altr_sdram_ctrl_of_match,
+ },
+};
+
+module_platform_driver(altr_sdram_edac_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Thor Thayer");
+MODULE_DESCRIPTION("EDAC Driver for Altera SDRAM Controller");
diff --git a/drivers/mailbox/omap-mailbox.c b/drivers/mailbox/omap-mailbox.c
index a27e00e..bcc7ee1 100644
--- a/drivers/mailbox/omap-mailbox.c
+++ b/drivers/mailbox/omap-mailbox.c
@@ -31,6 +31,7 @@
#include <linux/err.h>
#include <linux/notifier.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/platform_data/mailbox-omap.h>
@@ -94,6 +95,18 @@
struct list_head elem;
};
+struct omap_mbox_fifo_info {
+ int tx_id;
+ int tx_usr;
+ int tx_irq;
+
+ int rx_id;
+ int rx_usr;
+ int rx_irq;
+
+ const char *name;
+};
+
struct omap_mbox {
const char *name;
int irq;
@@ -587,24 +600,118 @@
return 0;
}
+static const struct of_device_id omap_mailbox_of_match[] = {
+ {
+ .compatible = "ti,omap2-mailbox",
+ .data = (void *)MBOX_INTR_CFG_TYPE1,
+ },
+ {
+ .compatible = "ti,omap3-mailbox",
+ .data = (void *)MBOX_INTR_CFG_TYPE1,
+ },
+ {
+ .compatible = "ti,omap4-mailbox",
+ .data = (void *)MBOX_INTR_CFG_TYPE2,
+ },
+ {
+ /* end */
+ },
+};
+MODULE_DEVICE_TABLE(of, omap_mailbox_of_match);
+
static int omap_mbox_probe(struct platform_device *pdev)
{
struct resource *mem;
int ret;
struct omap_mbox **list, *mbox, *mboxblk;
struct omap_mbox_pdata *pdata = pdev->dev.platform_data;
- struct omap_mbox_dev_info *info;
+ struct omap_mbox_dev_info *info = NULL;
+ struct omap_mbox_fifo_info *finfo, *finfoblk;
struct omap_mbox_device *mdev;
struct omap_mbox_fifo *fifo;
- u32 intr_type;
+ struct device_node *node = pdev->dev.of_node;
+ struct device_node *child;
+ const struct of_device_id *match;
+ u32 intr_type, info_count;
+ u32 num_users, num_fifos;
+ u32 tmp[3];
u32 l;
int i;
- if (!pdata || !pdata->info_cnt || !pdata->info) {
+ if (!node && (!pdata || !pdata->info_cnt || !pdata->info)) {
pr_err("%s: platform not supported\n", __func__);
return -ENODEV;
}
+ if (node) {
+ match = of_match_device(omap_mailbox_of_match, &pdev->dev);
+ if (!match)
+ return -ENODEV;
+ intr_type = (u32)match->data;
+
+ if (of_property_read_u32(node, "ti,mbox-num-users",
+ &num_users))
+ return -ENODEV;
+
+ if (of_property_read_u32(node, "ti,mbox-num-fifos",
+ &num_fifos))
+ return -ENODEV;
+
+ info_count = of_get_available_child_count(node);
+ if (!info_count) {
+ dev_err(&pdev->dev, "no available mbox devices found\n");
+ return -ENODEV;
+ }
+ } else { /* non-DT device creation */
+ info_count = pdata->info_cnt;
+ info = pdata->info;
+ intr_type = pdata->intr_type;
+ num_users = pdata->num_users;
+ num_fifos = pdata->num_fifos;
+ }
+
+ finfoblk = devm_kzalloc(&pdev->dev, info_count * sizeof(*finfoblk),
+ GFP_KERNEL);
+ if (!finfoblk)
+ return -ENOMEM;
+
+ finfo = finfoblk;
+ child = NULL;
+ for (i = 0; i < info_count; i++, finfo++) {
+ if (node) {
+ child = of_get_next_available_child(node, child);
+ ret = of_property_read_u32_array(child, "ti,mbox-tx",
+ tmp, ARRAY_SIZE(tmp));
+ if (ret)
+ return ret;
+ finfo->tx_id = tmp[0];
+ finfo->tx_irq = tmp[1];
+ finfo->tx_usr = tmp[2];
+
+ ret = of_property_read_u32_array(child, "ti,mbox-rx",
+ tmp, ARRAY_SIZE(tmp));
+ if (ret)
+ return ret;
+ finfo->rx_id = tmp[0];
+ finfo->rx_irq = tmp[1];
+ finfo->rx_usr = tmp[2];
+
+ finfo->name = child->name;
+ } else {
+ finfo->tx_id = info->tx_id;
+ finfo->rx_id = info->rx_id;
+ finfo->tx_usr = info->usr_id;
+ finfo->tx_irq = info->irq_id;
+ finfo->rx_usr = info->usr_id;
+ finfo->rx_irq = info->irq_id;
+ finfo->name = info->name;
+ info++;
+ }
+ if (finfo->tx_id >= num_fifos || finfo->rx_id >= num_fifos ||
+ finfo->tx_usr >= num_users || finfo->rx_usr >= num_users)
+ return -EINVAL;
+ }
+
mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_KERNEL);
if (!mdev)
return -ENOMEM;
@@ -615,41 +722,40 @@
return PTR_ERR(mdev->mbox_base);
/* allocate one extra for marking end of list */
- list = devm_kzalloc(&pdev->dev, (pdata->info_cnt + 1) * sizeof(*list),
+ list = devm_kzalloc(&pdev->dev, (info_count + 1) * sizeof(*list),
GFP_KERNEL);
if (!list)
return -ENOMEM;
- mboxblk = devm_kzalloc(&pdev->dev, pdata->info_cnt * sizeof(*mbox),
+ mboxblk = devm_kzalloc(&pdev->dev, info_count * sizeof(*mbox),
GFP_KERNEL);
if (!mboxblk)
return -ENOMEM;
- info = pdata->info;
- intr_type = pdata->intr_type;
mbox = mboxblk;
- for (i = 0; i < pdata->info_cnt; i++, info++) {
+ finfo = finfoblk;
+ for (i = 0; i < info_count; i++, finfo++) {
fifo = &mbox->tx_fifo;
- fifo->msg = MAILBOX_MESSAGE(info->tx_id);
- fifo->fifo_stat = MAILBOX_FIFOSTATUS(info->tx_id);
- fifo->intr_bit = MAILBOX_IRQ_NOTFULL(info->tx_id);
- fifo->irqenable = MAILBOX_IRQENABLE(intr_type, info->usr_id);
- fifo->irqstatus = MAILBOX_IRQSTATUS(intr_type, info->usr_id);
- fifo->irqdisable = MAILBOX_IRQDISABLE(intr_type, info->usr_id);
+ fifo->msg = MAILBOX_MESSAGE(finfo->tx_id);
+ fifo->fifo_stat = MAILBOX_FIFOSTATUS(finfo->tx_id);
+ fifo->intr_bit = MAILBOX_IRQ_NOTFULL(finfo->tx_id);
+ fifo->irqenable = MAILBOX_IRQENABLE(intr_type, finfo->tx_usr);
+ fifo->irqstatus = MAILBOX_IRQSTATUS(intr_type, finfo->tx_usr);
+ fifo->irqdisable = MAILBOX_IRQDISABLE(intr_type, finfo->tx_usr);
fifo = &mbox->rx_fifo;
- fifo->msg = MAILBOX_MESSAGE(info->rx_id);
- fifo->msg_stat = MAILBOX_MSGSTATUS(info->rx_id);
- fifo->intr_bit = MAILBOX_IRQ_NEWMSG(info->rx_id);
- fifo->irqenable = MAILBOX_IRQENABLE(intr_type, info->usr_id);
- fifo->irqstatus = MAILBOX_IRQSTATUS(intr_type, info->usr_id);
- fifo->irqdisable = MAILBOX_IRQDISABLE(intr_type, info->usr_id);
+ fifo->msg = MAILBOX_MESSAGE(finfo->rx_id);
+ fifo->msg_stat = MAILBOX_MSGSTATUS(finfo->rx_id);
+ fifo->intr_bit = MAILBOX_IRQ_NEWMSG(finfo->rx_id);
+ fifo->irqenable = MAILBOX_IRQENABLE(intr_type, finfo->rx_usr);
+ fifo->irqstatus = MAILBOX_IRQSTATUS(intr_type, finfo->rx_usr);
+ fifo->irqdisable = MAILBOX_IRQDISABLE(intr_type, finfo->rx_usr);
mbox->intr_type = intr_type;
mbox->parent = mdev;
- mbox->name = info->name;
- mbox->irq = platform_get_irq(pdev, info->irq_id);
+ mbox->name = finfo->name;
+ mbox->irq = platform_get_irq(pdev, finfo->tx_irq);
if (mbox->irq < 0)
return mbox->irq;
list[i] = mbox++;
@@ -657,8 +763,8 @@
mutex_init(&mdev->cfg_lock);
mdev->dev = &pdev->dev;
- mdev->num_users = pdata->num_users;
- mdev->num_fifos = pdata->num_fifos;
+ mdev->num_users = num_users;
+ mdev->num_fifos = num_fifos;
mdev->mboxes = list;
ret = omap_mbox_register(mdev);
if (ret)
@@ -684,6 +790,7 @@
if (ret < 0)
goto unregister;
+ devm_kfree(&pdev->dev, finfoblk);
return 0;
unregister:
@@ -708,6 +815,7 @@
.driver = {
.name = "omap-mailbox",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(omap_mailbox_of_match),
},
};
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index fab81a1..6d91c27 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -7,6 +7,16 @@
if MEMORY
+config ATMEL_SDRAMC
+ bool "Atmel (Multi-port DDR-)SDRAM Controller"
+ default y
+ depends on ARCH_AT91 && OF
+ help
+ This driver is for Atmel SDRAM Controller or Atmel Multi-port
+ DDR-SDRAM Controller available on Atmel AT91SAM9 and SAMA5 SoCs.
+ Starting with the at91sam9g45, this controller supports SDR, DDR and
+ LP-DDR memories.
+
config TI_AEMIF
tristate "Texas Instruments AEMIF driver"
depends on (ARCH_DAVINCI || ARCH_KEYSTONE) && OF
diff --git a/drivers/memory/Makefile b/drivers/memory/Makefile
index 4055c47..c32d319 100644
--- a/drivers/memory/Makefile
+++ b/drivers/memory/Makefile
@@ -5,6 +5,7 @@
ifeq ($(CONFIG_DDR),y)
obj-$(CONFIG_OF) += of_memory.o
endif
+obj-$(CONFIG_ATMEL_SDRAMC) += atmel-sdramc.o
obj-$(CONFIG_TI_AEMIF) += ti-aemif.o
obj-$(CONFIG_TI_EMIF) += emif.o
obj-$(CONFIG_FSL_CORENET_CF) += fsl-corenet-cf.o
diff --git a/drivers/memory/atmel-sdramc.c b/drivers/memory/atmel-sdramc.c
new file mode 100644
index 0000000..fed04e8
--- /dev/null
+++ b/drivers/memory/atmel-sdramc.c
@@ -0,0 +1,98 @@
+/*
+ * Atmel (Multi-port DDR-)SDRAM Controller driver
+ *
+ * Copyright (C) 2014 Atmel
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+struct at91_ramc_caps {
+ bool has_ddrck;
+ bool has_mpddr_clk;
+};
+
+static const struct at91_ramc_caps at91rm9200_caps = { };
+
+static const struct at91_ramc_caps at91sam9g45_caps = {
+ .has_ddrck = 1,
+ .has_mpddr_clk = 0,
+};
+
+static const struct at91_ramc_caps sama5d3_caps = {
+ .has_ddrck = 1,
+ .has_mpddr_clk = 1,
+};
+
+static const struct of_device_id atmel_ramc_of_match[] = {
+ { .compatible = "atmel,at91rm9200-sdramc", .data = &at91rm9200_caps, },
+ { .compatible = "atmel,at91sam9260-sdramc", .data = &at91rm9200_caps, },
+ { .compatible = "atmel,at91sam9g45-ddramc", .data = &at91sam9g45_caps, },
+ { .compatible = "atmel,sama5d3-ddramc", .data = &sama5d3_caps, },
+ {},
+};
+MODULE_DEVICE_TABLE(of, atmel_ramc_of_match);
+
+static int atmel_ramc_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ const struct at91_ramc_caps *caps;
+ struct clk *clk;
+
+ match = of_match_device(atmel_ramc_of_match, &pdev->dev);
+ caps = match->data;
+
+ if (caps->has_ddrck) {
+ clk = devm_clk_get(&pdev->dev, "ddrck");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+ clk_prepare_enable(clk);
+ }
+
+ if (caps->has_mpddr_clk) {
+ clk = devm_clk_get(&pdev->dev, "mpddr");
+ if (IS_ERR(clk)) {
+ pr_err("AT91 RAMC: couldn't get mpddr clock\n");
+ return PTR_ERR(clk);
+ }
+ clk_prepare_enable(clk);
+ }
+
+ return 0;
+}
+
+static struct platform_driver atmel_ramc_driver = {
+ .probe = atmel_ramc_probe,
+ .driver = {
+ .name = "atmel-ramc",
+ .owner = THIS_MODULE,
+ .of_match_table = atmel_ramc_of_match,
+ },
+};
+
+static int __init atmel_ramc_init(void)
+{
+ return platform_driver_register(&atmel_ramc_driver);
+}
+module_init(atmel_ramc_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Alexandre Belloni <alexandre.belloni@free-electrons.com>");
+MODULE_DESCRIPTION("Atmel (Multi-port DDR-)SDRAM Controller");
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index ca41523..81b4451 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -6,15 +6,33 @@
Say Y here to enable board reset and power off
+if POWER_RESET
+
config POWER_RESET_AS3722
bool "ams AS3722 power-off driver"
- depends on MFD_AS3722 && POWER_RESET
+ depends on MFD_AS3722
help
This driver supports turning off board via a ams AS3722 power-off.
+config POWER_RESET_AT91_POWEROFF
+ bool "Atmel AT91 poweroff driver"
+ depends on ARCH_AT91
+ default SOC_AT91SAM9 || SOC_SAMA5
+ help
+ This driver supports poweroff for Atmel AT91SAM9 and SAMA5
+ SoCs
+
+config POWER_RESET_AT91_RESET
+ bool "Atmel AT91 reset driver"
+ depends on ARCH_AT91
+ default SOC_AT91SAM9 || SOC_SAMA5
+ help
+ This driver supports restart for Atmel AT91SAM9 and SAMA5
+ SoCs
+
config POWER_RESET_AXXIA
bool "LSI Axxia reset driver"
- depends on POWER_RESET && ARCH_AXXIA
+ depends on ARCH_AXXIA
help
This driver supports restart for Axxia SoC.
@@ -33,7 +51,7 @@
config POWER_RESET_GPIO
bool "GPIO power-off driver"
- depends on OF_GPIO && POWER_RESET
+ depends on OF_GPIO
help
This driver supports turning off your board via a GPIO line.
If your board needs a GPIO high/low to power down, say Y and
@@ -47,13 +65,13 @@
config POWER_RESET_MSM
bool "Qualcomm MSM power-off driver"
- depends on POWER_RESET && ARCH_QCOM
+ depends on ARCH_QCOM
help
Power off and restart support for Qualcomm boards.
config POWER_RESET_QNAP
bool "QNAP power-off driver"
- depends on OF_GPIO && POWER_RESET && PLAT_ORION
+ depends on OF_GPIO && PLAT_ORION
help
This driver supports turning off QNAP NAS devices by sending
commands to the microcontroller which controls the main power.
@@ -71,14 +89,13 @@
config POWER_RESET_SUN6I
bool "Allwinner A31 SoC reset driver"
depends on ARCH_SUNXI
- depends on POWER_RESET
help
Reboot support for the Allwinner A31 SoCs.
config POWER_RESET_VEXPRESS
bool "ARM Versatile Express power-off and reset driver"
depends on ARM || ARM64
- depends on POWER_RESET && VEXPRESS_CONFIG
+ depends on VEXPRESS_CONFIG
help
Power off and reset support for the ARM Ltd. Versatile
Express boards.
@@ -86,7 +103,6 @@
config POWER_RESET_XGENE
bool "APM SoC X-Gene reset driver"
depends on ARM64
- depends on POWER_RESET
help
Reboot support for the APM SoC X-Gene Eval boards.
@@ -97,3 +113,4 @@
help
Reboot support for the KEYSTONE SoCs.
+endif
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
index a42e70e..4433a75 100644
--- a/drivers/power/reset/Makefile
+++ b/drivers/power/reset/Makefile
@@ -1,4 +1,6 @@
obj-$(CONFIG_POWER_RESET_AS3722) += as3722-poweroff.o
+obj-$(CONFIG_POWER_RESET_AT91_POWEROFF) += at91-poweroff.o
+obj-$(CONFIG_POWER_RESET_AT91_RESET) += at91-reset.o
obj-$(CONFIG_POWER_RESET_AXXIA) += axxia-reset.o
obj-$(CONFIG_POWER_RESET_BRCMSTB) += brcmstb-reboot.o
obj-$(CONFIG_POWER_RESET_GPIO) += gpio-poweroff.o
diff --git a/drivers/power/reset/at91-poweroff.c b/drivers/power/reset/at91-poweroff.c
new file mode 100644
index 0000000..c610003
--- /dev/null
+++ b/drivers/power/reset/at91-poweroff.c
@@ -0,0 +1,156 @@
+/*
+ * Atmel AT91 SAM9 SoCs reset code
+ *
+ * Copyright (C) 2007 Atmel Corporation.
+ * Copyright (C) 2011 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com>
+ * Copyright (C) 2014 Free Electrons
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+
+#define AT91_SHDW_CR 0x00 /* Shut Down Control Register */
+#define AT91_SHDW_SHDW BIT(0) /* Shut Down command */
+#define AT91_SHDW_KEY (0xa5 << 24) /* KEY Password */
+
+#define AT91_SHDW_MR 0x04 /* Shut Down Mode Register */
+#define AT91_SHDW_WKMODE0 GENMASK(2, 0) /* Wake-up 0 Mode Selection */
+#define AT91_SHDW_CPTWK0_MAX 0xf /* Maximum Counter On Wake Up 0 */
+#define AT91_SHDW_CPTWK0 (AT91_SHDW_CPTWK0_MAX << 4) /* Counter On Wake Up 0 */
+#define AT91_SHDW_CPTWK0_(x) ((x) << 4)
+#define AT91_SHDW_RTTWKEN BIT(16) /* Real Time Timer Wake-up Enable */
+#define AT91_SHDW_RTCWKEN BIT(17) /* Real Time Clock Wake-up Enable */
+
+#define AT91_SHDW_SR 0x08 /* Shut Down Status Register */
+#define AT91_SHDW_WAKEUP0 BIT(0) /* Wake-up 0 Status */
+#define AT91_SHDW_RTTWK BIT(16) /* Real-time Timer Wake-up */
+#define AT91_SHDW_RTCWK BIT(17) /* Real-time Clock Wake-up [SAM9RL] */
+
+enum wakeup_type {
+ AT91_SHDW_WKMODE0_NONE = 0,
+ AT91_SHDW_WKMODE0_HIGH = 1,
+ AT91_SHDW_WKMODE0_LOW = 2,
+ AT91_SHDW_WKMODE0_ANYLEVEL = 3,
+};
+
+static const char *shdwc_wakeup_modes[] = {
+ [AT91_SHDW_WKMODE0_NONE] = "none",
+ [AT91_SHDW_WKMODE0_HIGH] = "high",
+ [AT91_SHDW_WKMODE0_LOW] = "low",
+ [AT91_SHDW_WKMODE0_ANYLEVEL] = "any",
+};
+
+static void __iomem *at91_shdwc_base;
+
+static void __init at91_wakeup_status(void)
+{
+ u32 reg = readl(at91_shdwc_base + AT91_SHDW_SR);
+ char *reason = "unknown";
+
+ /* Simple power-on, just bail out */
+ if (!reg)
+ return;
+
+ if (reg & AT91_SHDW_RTTWK)
+ reason = "RTT";
+ else if (reg & AT91_SHDW_RTCWK)
+ reason = "RTC";
+
+ pr_info("AT91: Wake-Up source: %s\n", reason);
+}
+
+static void at91_poweroff(void)
+{
+ writel(AT91_SHDW_KEY | AT91_SHDW_SHDW, at91_shdwc_base + AT91_SHDW_CR);
+}
+
+const enum wakeup_type at91_poweroff_get_wakeup_mode(struct device_node *np)
+{
+ const char *pm;
+ int err, i;
+
+ err = of_property_read_string(np, "atmel,wakeup-mode", &pm);
+ if (err < 0)
+ return AT91_SHDW_WKMODE0_ANYLEVEL;
+
+ for (i = 0; i < ARRAY_SIZE(shdwc_wakeup_modes); i++)
+ if (!strcasecmp(pm, shdwc_wakeup_modes[i]))
+ return i;
+
+ return -ENODEV;
+}
+
+static void at91_poweroff_dt_set_wakeup_mode(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ enum wakeup_type wakeup_mode;
+ u32 mode = 0, tmp;
+
+ wakeup_mode = at91_poweroff_get_wakeup_mode(np);
+ if (wakeup_mode < 0) {
+ dev_warn(&pdev->dev, "shdwc unknown wakeup mode\n");
+ return;
+ }
+
+ if (!of_property_read_u32(np, "atmel,wakeup-counter", &tmp)) {
+ if (tmp > AT91_SHDW_CPTWK0_MAX) {
+ dev_warn(&pdev->dev,
+ "shdwc wakeup counter 0x%x > 0x%x reduce it to 0x%x\n",
+ tmp, AT91_SHDW_CPTWK0_MAX, AT91_SHDW_CPTWK0_MAX);
+ tmp = AT91_SHDW_CPTWK0_MAX;
+ }
+ mode |= AT91_SHDW_CPTWK0_(tmp);
+ }
+
+ if (of_property_read_bool(np, "atmel,wakeup-rtc-timer"))
+ mode |= AT91_SHDW_RTCWKEN;
+
+ if (of_property_read_bool(np, "atmel,wakeup-rtt-timer"))
+ mode |= AT91_SHDW_RTTWKEN;
+
+ writel(wakeup_mode | mode, at91_shdwc_base + AT91_SHDW_MR);
+}
+
+static int at91_poweroff_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ at91_shdwc_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(at91_shdwc_base)) {
+ dev_err(&pdev->dev, "Could not map reset controller address\n");
+ return PTR_ERR(at91_shdwc_base);
+ }
+
+ at91_wakeup_status();
+
+ if (pdev->dev.of_node)
+ at91_poweroff_dt_set_wakeup_mode(pdev);
+
+ pm_power_off = at91_poweroff;
+
+ return 0;
+}
+
+static struct of_device_id at91_poweroff_of_match[] = {
+ { .compatible = "atmel,at91sam9260-shdwc", },
+ { .compatible = "atmel,at91sam9rl-shdwc", },
+ { .compatible = "atmel,at91sam9x5-shdwc", },
+ { /*sentinel*/ }
+};
+
+static struct platform_driver at91_poweroff_driver = {
+ .probe = at91_poweroff_probe,
+ .driver = {
+ .name = "at91-poweroff",
+ .of_match_table = at91_poweroff_of_match,
+ },
+};
+module_platform_driver(at91_poweroff_driver);
diff --git a/drivers/power/reset/at91-reset.c b/drivers/power/reset/at91-reset.c
new file mode 100644
index 0000000..3611806
--- /dev/null
+++ b/drivers/power/reset/at91-reset.c
@@ -0,0 +1,252 @@
+/*
+ * Atmel AT91 SAM9 SoCs reset code
+ *
+ * Copyright (C) 2007 Atmel Corporation.
+ * Copyright (C) BitBox Ltd 2010
+ * Copyright (C) 2011 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcosoft.com>
+ * Copyright (C) 2014 Free Electrons
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+
+#include <asm/system_misc.h>
+
+#include <mach/at91sam9_ddrsdr.h>
+#include <mach/at91sam9_sdramc.h>
+
+#define AT91_RSTC_CR 0x00 /* Reset Controller Control Register */
+#define AT91_RSTC_PROCRST BIT(0) /* Processor Reset */
+#define AT91_RSTC_PERRST BIT(2) /* Peripheral Reset */
+#define AT91_RSTC_EXTRST BIT(3) /* External Reset */
+#define AT91_RSTC_KEY (0xa5 << 24) /* KEY Password */
+
+#define AT91_RSTC_SR 0x04 /* Reset Controller Status Register */
+#define AT91_RSTC_URSTS BIT(0) /* User Reset Status */
+#define AT91_RSTC_RSTTYP GENMASK(10, 8) /* Reset Type */
+#define AT91_RSTC_NRSTL BIT(16) /* NRST Pin Level */
+#define AT91_RSTC_SRCMP BIT(17) /* Software Reset Command in Progress */
+
+#define AT91_RSTC_MR 0x08 /* Reset Controller Mode Register */
+#define AT91_RSTC_URSTEN BIT(0) /* User Reset Enable */
+#define AT91_RSTC_URSTIEN BIT(4) /* User Reset Interrupt Enable */
+#define AT91_RSTC_ERSTL GENMASK(11, 8) /* External Reset Length */
+
+enum reset_type {
+ RESET_TYPE_GENERAL = 0,
+ RESET_TYPE_WAKEUP = 1,
+ RESET_TYPE_WATCHDOG = 2,
+ RESET_TYPE_SOFTWARE = 3,
+ RESET_TYPE_USER = 4,
+};
+
+static void __iomem *at91_ramc_base[2], *at91_rstc_base;
+
+/*
+* unless the SDRAM is cleanly shutdown before we hit the
+* reset register it can be left driving the data bus and
+* killing the chance of a subsequent boot from NAND
+*/
+static void at91sam9260_restart(enum reboot_mode mode, const char *cmd)
+{
+ asm volatile(
+ /* Align to cache lines */
+ ".balign 32\n\t"
+
+ /* Disable SDRAM accesses */
+ "str %2, [%0, #" __stringify(AT91_SDRAMC_TR) "]\n\t"
+
+ /* Power down SDRAM */
+ "str %3, [%0, #" __stringify(AT91_SDRAMC_LPR) "]\n\t"
+
+ /* Reset CPU */
+ "str %4, [%1, #" __stringify(AT91_RSTC_CR) "]\n\t"
+
+ "b .\n\t"
+ :
+ : "r" (at91_ramc_base[0]),
+ "r" (at91_rstc_base),
+ "r" (1),
+ "r" (AT91_SDRAMC_LPCB_POWER_DOWN),
+ "r" (AT91_RSTC_KEY | AT91_RSTC_PERRST | AT91_RSTC_PROCRST));
+}
+
+static void at91sam9g45_restart(enum reboot_mode mode, const char *cmd)
+{
+ asm volatile(
+ /*
+ * Test wether we have a second RAM controller to care
+ * about.
+ *
+ * First, test that we can dereference the virtual address.
+ */
+ "cmp %1, #0\n\t"
+ "beq 1f\n\t"
+
+ /* Then, test that the RAM controller is enabled */
+ "ldr r0, [%1]\n\t"
+ "cmp r0, #0\n\t"
+
+ /* Align to cache lines */
+ ".balign 32\n\t"
+
+ /* Disable SDRAM0 accesses */
+ "1: str %3, [%0, #" __stringify(AT91_DDRSDRC_RTR) "]\n\t"
+ /* Power down SDRAM0 */
+ " str %4, [%0, #" __stringify(AT91_DDRSDRC_RTR) "]\n\t"
+ /* Disable SDRAM1 accesses */
+ " strne %3, [%1, #" __stringify(AT91_DDRSDRC_RTR) "]\n\t"
+ /* Power down SDRAM1 */
+ " strne %4, [%1, #" __stringify(AT91_DDRSDRC_RTR) "]\n\t"
+ /* Reset CPU */
+ " str %5, [%2, #" __stringify(AT91_RSTC_CR) "]\n\t"
+
+ " b .\n\t"
+ :
+ : "r" (at91_ramc_base[0]),
+ "r" (at91_ramc_base[1]),
+ "r" (at91_rstc_base),
+ "r" (1),
+ "r" (AT91_DDRSDRC_LPCB_POWER_DOWN),
+ "r" (AT91_RSTC_KEY | AT91_RSTC_PERRST | AT91_RSTC_PROCRST)
+ : "r0");
+}
+
+static void __init at91_reset_status(struct platform_device *pdev)
+{
+ u32 reg = readl(at91_rstc_base + AT91_RSTC_SR);
+ char *reason;
+
+ switch ((reg & AT91_RSTC_RSTTYP) >> 8) {
+ case RESET_TYPE_GENERAL:
+ reason = "general reset";
+ break;
+ case RESET_TYPE_WAKEUP:
+ reason = "wakeup";
+ break;
+ case RESET_TYPE_WATCHDOG:
+ reason = "watchdog reset";
+ break;
+ case RESET_TYPE_SOFTWARE:
+ reason = "software reset";
+ break;
+ case RESET_TYPE_USER:
+ reason = "user reset";
+ break;
+ default:
+ reason = "unknown reset";
+ break;
+ }
+
+ pr_info("AT91: Starting after %s\n", reason);
+}
+
+static struct of_device_id at91_ramc_of_match[] = {
+ { .compatible = "atmel,at91sam9260-sdramc", },
+ { .compatible = "atmel,at91sam9g45-ddramc", },
+ { .compatible = "atmel,sama5d3-ddramc", },
+ { /* sentinel */ }
+};
+
+static struct of_device_id at91_reset_of_match[] = {
+ { .compatible = "atmel,at91sam9260-rstc", .data = at91sam9260_restart },
+ { .compatible = "atmel,at91sam9g45-rstc", .data = at91sam9g45_restart },
+ { /* sentinel */ }
+};
+
+static int at91_reset_of_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ struct device_node *np;
+ int idx = 0;
+
+ at91_rstc_base = of_iomap(pdev->dev.of_node, 0);
+ if (!at91_rstc_base) {
+ dev_err(&pdev->dev, "Could not map reset controller address\n");
+ return -ENODEV;
+ }
+
+ for_each_matching_node(np, at91_ramc_of_match) {
+ at91_ramc_base[idx] = of_iomap(np, 0);
+ if (!at91_ramc_base[idx]) {
+ dev_err(&pdev->dev, "Could not map ram controller address\n");
+ return -ENODEV;
+ }
+ idx++;
+ }
+
+ match = of_match_node(at91_reset_of_match, pdev->dev.of_node);
+ arm_pm_restart = match->data;
+
+ return 0;
+}
+
+static int at91_reset_platform_probe(struct platform_device *pdev)
+{
+ const struct platform_device_id *match;
+ struct resource *res;
+ int idx = 0;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ at91_rstc_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(at91_rstc_base)) {
+ dev_err(&pdev->dev, "Could not map reset controller address\n");
+ return PTR_ERR(at91_rstc_base);
+ }
+
+ for (idx = 0; idx < 2; idx++) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, idx + 1 );
+ at91_ramc_base[idx] = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (IS_ERR(at91_ramc_base[idx])) {
+ dev_err(&pdev->dev, "Could not map ram controller address\n");
+ return PTR_ERR(at91_ramc_base[idx]);
+ }
+ }
+
+ match = platform_get_device_id(pdev);
+ arm_pm_restart = (void (*)(enum reboot_mode, const char*))
+ match->driver_data;
+
+ return 0;
+}
+
+static int at91_reset_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ if (pdev->dev.of_node)
+ ret = at91_reset_of_probe(pdev);
+ else
+ ret = at91_reset_platform_probe(pdev);
+
+ if (ret)
+ return ret;
+
+ at91_reset_status(pdev);
+
+ return 0;
+}
+
+static struct platform_device_id at91_reset_plat_match[] = {
+ { "at91-sam9260-reset", (unsigned long)at91sam9260_restart },
+ { "at91-sam9g45-reset", (unsigned long)at91sam9g45_restart },
+ { /* sentinel */ }
+};
+
+static struct platform_driver at91_reset_driver = {
+ .probe = at91_reset_probe,
+ .driver = {
+ .name = "at91-reset",
+ .of_match_table = at91_reset_of_match,
+ },
+ .id_table = at91_reset_plat_match,
+};
+module_platform_driver(at91_reset_driver);
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index a168e96..6b34bc9 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -1175,9 +1175,16 @@
If you say Y here you will get support for the Hypervisor
based RTC on SUN4V systems.
+config RTC_DRV_SUN6I
+ tristate "Allwinner A31 RTC"
+ depends on MACH_SUN6I || MACH_SUN8I
+ help
+ If you say Y here you will get support for the RTC found on
+ Allwinner A31.
+
config RTC_DRV_SUNXI
tristate "Allwinner sun4i/sun7i RTC"
- depends on ARCH_SUNXI
+ depends on MACH_SUN4I || MACH_SUN7I
help
If you say Y here you will get support for the RTC found on
Allwinner A10/A20.
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 56f061c..9055b7d 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -128,6 +128,7 @@
obj-$(CONFIG_RTC_DRV_STK17TA8) += rtc-stk17ta8.o
obj-$(CONFIG_RTC_DRV_STMP) += rtc-stmp3xxx.o
obj-$(CONFIG_RTC_DRV_SUN4V) += rtc-sun4v.o
+obj-$(CONFIG_RTC_DRV_SUN6I) += rtc-sun6i.o
obj-$(CONFIG_RTC_DRV_SUNXI) += rtc-sunxi.o
obj-$(CONFIG_RTC_DRV_TEGRA) += rtc-tegra.o
obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o
diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c
new file mode 100644
index 0000000..c169a2c
--- /dev/null
+++ b/drivers/rtc/rtc-sun6i.c
@@ -0,0 +1,447 @@
+/*
+ * An RTC driver for Allwinner A31/A23
+ *
+ * Copyright (c) 2014, Chen-Yu Tsai <wens@csie.org>
+ *
+ * based on rtc-sunxi.c
+ *
+ * An RTC driver for Allwinner A10/A20
+ *
+ * Copyright (c) 2013, Carlo Caione <carlo.caione@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/types.h>
+
+/* Control register */
+#define SUN6I_LOSC_CTRL 0x0000
+#define SUN6I_LOSC_CTRL_ALM_DHMS_ACC BIT(9)
+#define SUN6I_LOSC_CTRL_RTC_HMS_ACC BIT(8)
+#define SUN6I_LOSC_CTRL_RTC_YMD_ACC BIT(7)
+#define SUN6I_LOSC_CTRL_ACC_MASK GENMASK(9, 7)
+
+/* RTC */
+#define SUN6I_RTC_YMD 0x0010
+#define SUN6I_RTC_HMS 0x0014
+
+/* Alarm 0 (counter) */
+#define SUN6I_ALRM_COUNTER 0x0020
+#define SUN6I_ALRM_CUR_VAL 0x0024
+#define SUN6I_ALRM_EN 0x0028
+#define SUN6I_ALRM_EN_CNT_EN BIT(0)
+#define SUN6I_ALRM_IRQ_EN 0x002c
+#define SUN6I_ALRM_IRQ_EN_CNT_IRQ_EN BIT(0)
+#define SUN6I_ALRM_IRQ_STA 0x0030
+#define SUN6I_ALRM_IRQ_STA_CNT_IRQ_PEND BIT(0)
+
+/* Alarm 1 (wall clock) */
+#define SUN6I_ALRM1_EN 0x0044
+#define SUN6I_ALRM1_IRQ_EN 0x0048
+#define SUN6I_ALRM1_IRQ_STA 0x004c
+#define SUN6I_ALRM1_IRQ_STA_WEEK_IRQ_PEND BIT(0)
+
+/* Alarm config */
+#define SUN6I_ALARM_CONFIG 0x0050
+#define SUN6I_ALARM_CONFIG_WAKEUP BIT(0)
+
+/*
+ * Get date values
+ */
+#define SUN6I_DATE_GET_DAY_VALUE(x) ((x) & 0x0000001f)
+#define SUN6I_DATE_GET_MON_VALUE(x) (((x) & 0x00000f00) >> 8)
+#define SUN6I_DATE_GET_YEAR_VALUE(x) (((x) & 0x003f0000) >> 16)
+#define SUN6I_LEAP_GET_VALUE(x) (((x) & 0x00400000) >> 22)
+
+/*
+ * Get time values
+ */
+#define SUN6I_TIME_GET_SEC_VALUE(x) ((x) & 0x0000003f)
+#define SUN6I_TIME_GET_MIN_VALUE(x) (((x) & 0x00003f00) >> 8)
+#define SUN6I_TIME_GET_HOUR_VALUE(x) (((x) & 0x001f0000) >> 16)
+
+/*
+ * Set date values
+ */
+#define SUN6I_DATE_SET_DAY_VALUE(x) ((x) & 0x0000001f)
+#define SUN6I_DATE_SET_MON_VALUE(x) ((x) << 8 & 0x00000f00)
+#define SUN6I_DATE_SET_YEAR_VALUE(x) ((x) << 16 & 0x003f0000)
+#define SUN6I_LEAP_SET_VALUE(x) ((x) << 22 & 0x00400000)
+
+/*
+ * Set time values
+ */
+#define SUN6I_TIME_SET_SEC_VALUE(x) ((x) & 0x0000003f)
+#define SUN6I_TIME_SET_MIN_VALUE(x) ((x) << 8 & 0x00003f00)
+#define SUN6I_TIME_SET_HOUR_VALUE(x) ((x) << 16 & 0x001f0000)
+
+/*
+ * The year parameter passed to the driver is usually an offset relative to
+ * the year 1900. This macro is used to convert this offset to another one
+ * relative to the minimum year allowed by the hardware.
+ *
+ * The year range is 1970 - 2033. This range is selected to match Allwinner's
+ * driver, even though it is somewhat limited.
+ */
+#define SUN6I_YEAR_MIN 1970
+#define SUN6I_YEAR_MAX 2033
+#define SUN6I_YEAR_OFF (SUN6I_YEAR_MIN - 1900)
+
+struct sun6i_rtc_dev {
+ struct rtc_device *rtc;
+ struct device *dev;
+ void __iomem *base;
+ int irq;
+ unsigned long alarm;
+};
+
+static irqreturn_t sun6i_rtc_alarmirq(int irq, void *id)
+{
+ struct sun6i_rtc_dev *chip = (struct sun6i_rtc_dev *) id;
+ u32 val;
+
+ val = readl(chip->base + SUN6I_ALRM_IRQ_STA);
+
+ if (val & SUN6I_ALRM_IRQ_STA_CNT_IRQ_PEND) {
+ val |= SUN6I_ALRM_IRQ_STA_CNT_IRQ_PEND;
+ writel(val, chip->base + SUN6I_ALRM_IRQ_STA);
+
+ rtc_update_irq(chip->rtc, 1, RTC_AF | RTC_IRQF);
+
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static void sun6i_rtc_setaie(int to, struct sun6i_rtc_dev *chip)
+{
+ u32 alrm_val = 0;
+ u32 alrm_irq_val = 0;
+ u32 alrm_wake_val = 0;
+
+ if (to) {
+ alrm_val = SUN6I_ALRM_EN_CNT_EN;
+ alrm_irq_val = SUN6I_ALRM_IRQ_EN_CNT_IRQ_EN;
+ alrm_wake_val = SUN6I_ALARM_CONFIG_WAKEUP;
+ } else {
+ writel(SUN6I_ALRM_IRQ_STA_CNT_IRQ_PEND,
+ chip->base + SUN6I_ALRM_IRQ_STA);
+ }
+
+ writel(alrm_val, chip->base + SUN6I_ALRM_EN);
+ writel(alrm_irq_val, chip->base + SUN6I_ALRM_IRQ_EN);
+ writel(alrm_wake_val, chip->base + SUN6I_ALARM_CONFIG);
+}
+
+static int sun6i_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
+{
+ struct sun6i_rtc_dev *chip = dev_get_drvdata(dev);
+ u32 date, time;
+
+ /*
+ * read again in case it changes
+ */
+ do {
+ date = readl(chip->base + SUN6I_RTC_YMD);
+ time = readl(chip->base + SUN6I_RTC_HMS);
+ } while ((date != readl(chip->base + SUN6I_RTC_YMD)) ||
+ (time != readl(chip->base + SUN6I_RTC_HMS)));
+
+ rtc_tm->tm_sec = SUN6I_TIME_GET_SEC_VALUE(time);
+ rtc_tm->tm_min = SUN6I_TIME_GET_MIN_VALUE(time);
+ rtc_tm->tm_hour = SUN6I_TIME_GET_HOUR_VALUE(time);
+
+ rtc_tm->tm_mday = SUN6I_DATE_GET_DAY_VALUE(date);
+ rtc_tm->tm_mon = SUN6I_DATE_GET_MON_VALUE(date);
+ rtc_tm->tm_year = SUN6I_DATE_GET_YEAR_VALUE(date);
+
+ rtc_tm->tm_mon -= 1;
+
+ /*
+ * switch from (data_year->min)-relative offset to
+ * a (1900)-relative one
+ */
+ rtc_tm->tm_year += SUN6I_YEAR_OFF;
+
+ return rtc_valid_tm(rtc_tm);
+}
+
+static int sun6i_rtc_getalarm(struct device *dev, struct rtc_wkalrm *wkalrm)
+{
+ struct sun6i_rtc_dev *chip = dev_get_drvdata(dev);
+ u32 alrm_st;
+ u32 alrm_en;
+
+ alrm_en = readl(chip->base + SUN6I_ALRM_IRQ_EN);
+ alrm_st = readl(chip->base + SUN6I_ALRM_IRQ_STA);
+ wkalrm->enabled = !!(alrm_en & SUN6I_ALRM_EN_CNT_EN);
+ wkalrm->pending = !!(alrm_st & SUN6I_ALRM_EN_CNT_EN);
+ rtc_time_to_tm(chip->alarm, &wkalrm->time);
+
+ return 0;
+}
+
+static int sun6i_rtc_setalarm(struct device *dev, struct rtc_wkalrm *wkalrm)
+{
+ struct sun6i_rtc_dev *chip = dev_get_drvdata(dev);
+ struct rtc_time *alrm_tm = &wkalrm->time;
+ struct rtc_time tm_now;
+ unsigned long time_now = 0;
+ unsigned long time_set = 0;
+ unsigned long time_gap = 0;
+ int ret = 0;
+
+ ret = sun6i_rtc_gettime(dev, &tm_now);
+ if (ret < 0) {
+ dev_err(dev, "Error in getting time\n");
+ return -EINVAL;
+ }
+
+ rtc_tm_to_time(alrm_tm, &time_set);
+ rtc_tm_to_time(&tm_now, &time_now);
+ if (time_set <= time_now) {
+ dev_err(dev, "Date to set in the past\n");
+ return -EINVAL;
+ }
+
+ time_gap = time_set - time_now;
+
+ if (time_gap > U32_MAX) {
+ dev_err(dev, "Date too far in the future\n");
+ return -EINVAL;
+ }
+
+ sun6i_rtc_setaie(0, chip);
+ writel(0, chip->base + SUN6I_ALRM_COUNTER);
+ usleep_range(100, 300);
+
+ writel(time_gap, chip->base + SUN6I_ALRM_COUNTER);
+ chip->alarm = time_set;
+
+ sun6i_rtc_setaie(wkalrm->enabled, chip);
+
+ return 0;
+}
+
+static int sun6i_rtc_wait(struct sun6i_rtc_dev *chip, int offset,
+ unsigned int mask, unsigned int ms_timeout)
+{
+ const unsigned long timeout = jiffies + msecs_to_jiffies(ms_timeout);
+ u32 reg;
+
+ do {
+ reg = readl(chip->base + offset);
+ reg &= mask;
+
+ if (!reg)
+ return 0;
+
+ } while (time_before(jiffies, timeout));
+
+ return -ETIMEDOUT;
+}
+
+static int sun6i_rtc_settime(struct device *dev, struct rtc_time *rtc_tm)
+{
+ struct sun6i_rtc_dev *chip = dev_get_drvdata(dev);
+ u32 date = 0;
+ u32 time = 0;
+ int year;
+
+ year = rtc_tm->tm_year + 1900;
+ if (year < SUN6I_YEAR_MIN || year > SUN6I_YEAR_MAX) {
+ dev_err(dev, "rtc only supports year in range %d - %d\n",
+ SUN6I_YEAR_MIN, SUN6I_YEAR_MAX);
+ return -EINVAL;
+ }
+
+ rtc_tm->tm_year -= SUN6I_YEAR_OFF;
+ rtc_tm->tm_mon += 1;
+
+ date = SUN6I_DATE_SET_DAY_VALUE(rtc_tm->tm_mday) |
+ SUN6I_DATE_SET_MON_VALUE(rtc_tm->tm_mon) |
+ SUN6I_DATE_SET_YEAR_VALUE(rtc_tm->tm_year);
+
+ if (is_leap_year(year))
+ date |= SUN6I_LEAP_SET_VALUE(1);
+
+ time = SUN6I_TIME_SET_SEC_VALUE(rtc_tm->tm_sec) |
+ SUN6I_TIME_SET_MIN_VALUE(rtc_tm->tm_min) |
+ SUN6I_TIME_SET_HOUR_VALUE(rtc_tm->tm_hour);
+
+ /* Check whether registers are writable */
+ if (sun6i_rtc_wait(chip, SUN6I_LOSC_CTRL,
+ SUN6I_LOSC_CTRL_ACC_MASK, 50)) {
+ dev_err(dev, "rtc is still busy.\n");
+ return -EBUSY;
+ }
+
+ writel(time, chip->base + SUN6I_RTC_HMS);
+
+ /*
+ * After writing the RTC HH-MM-SS register, the
+ * SUN6I_LOSC_CTRL_RTC_HMS_ACC bit is set and it will not
+ * be cleared until the real writing operation is finished
+ */
+
+ if (sun6i_rtc_wait(chip, SUN6I_LOSC_CTRL,
+ SUN6I_LOSC_CTRL_RTC_HMS_ACC, 50)) {
+ dev_err(dev, "Failed to set rtc time.\n");
+ return -ETIMEDOUT;
+ }
+
+ writel(date, chip->base + SUN6I_RTC_YMD);
+
+ /*
+ * After writing the RTC YY-MM-DD register, the
+ * SUN6I_LOSC_CTRL_RTC_YMD_ACC bit is set and it will not
+ * be cleared until the real writing operation is finished
+ */
+
+ if (sun6i_rtc_wait(chip, SUN6I_LOSC_CTRL,
+ SUN6I_LOSC_CTRL_RTC_YMD_ACC, 50)) {
+ dev_err(dev, "Failed to set rtc time.\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int sun6i_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct sun6i_rtc_dev *chip = dev_get_drvdata(dev);
+
+ if (!enabled)
+ sun6i_rtc_setaie(enabled, chip);
+
+ return 0;
+}
+
+static const struct rtc_class_ops sun6i_rtc_ops = {
+ .read_time = sun6i_rtc_gettime,
+ .set_time = sun6i_rtc_settime,
+ .read_alarm = sun6i_rtc_getalarm,
+ .set_alarm = sun6i_rtc_setalarm,
+ .alarm_irq_enable = sun6i_rtc_alarm_irq_enable
+};
+
+static int sun6i_rtc_probe(struct platform_device *pdev)
+{
+ struct sun6i_rtc_dev *chip;
+ struct resource *res;
+ int ret;
+
+ chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, chip);
+ chip->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ chip->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(chip->base))
+ return PTR_ERR(chip->base);
+
+ chip->irq = platform_get_irq(pdev, 0);
+ if (chip->irq < 0) {
+ dev_err(&pdev->dev, "No IRQ resource\n");
+ return chip->irq;
+ }
+
+ ret = devm_request_irq(&pdev->dev, chip->irq, sun6i_rtc_alarmirq,
+ 0, dev_name(&pdev->dev), chip);
+ if (ret) {
+ dev_err(&pdev->dev, "Could not request IRQ\n");
+ return ret;
+ }
+
+ /* clear the alarm counter value */
+ writel(0, chip->base + SUN6I_ALRM_COUNTER);
+
+ /* disable counter alarm */
+ writel(0, chip->base + SUN6I_ALRM_EN);
+
+ /* disable counter alarm interrupt */
+ writel(0, chip->base + SUN6I_ALRM_IRQ_EN);
+
+ /* disable week alarm */
+ writel(0, chip->base + SUN6I_ALRM1_EN);
+
+ /* disable week alarm interrupt */
+ writel(0, chip->base + SUN6I_ALRM1_IRQ_EN);
+
+ /* clear counter alarm pending interrupts */
+ writel(SUN6I_ALRM_IRQ_STA_CNT_IRQ_PEND,
+ chip->base + SUN6I_ALRM_IRQ_STA);
+
+ /* clear week alarm pending interrupts */
+ writel(SUN6I_ALRM1_IRQ_STA_WEEK_IRQ_PEND,
+ chip->base + SUN6I_ALRM1_IRQ_STA);
+
+ /* disable alarm wakeup */
+ writel(0, chip->base + SUN6I_ALARM_CONFIG);
+
+ chip->rtc = rtc_device_register("rtc-sun6i", &pdev->dev,
+ &sun6i_rtc_ops, THIS_MODULE);
+ if (IS_ERR(chip->rtc)) {
+ dev_err(&pdev->dev, "unable to register device\n");
+ return PTR_ERR(chip->rtc);
+ }
+
+ dev_info(&pdev->dev, "RTC enabled\n");
+
+ return 0;
+}
+
+static int sun6i_rtc_remove(struct platform_device *pdev)
+{
+ struct sun6i_rtc_dev *chip = platform_get_drvdata(pdev);
+
+ rtc_device_unregister(chip->rtc);
+
+ return 0;
+}
+
+static const struct of_device_id sun6i_rtc_dt_ids[] = {
+ { .compatible = "allwinner,sun6i-a31-rtc" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, sun6i_rtc_dt_ids);
+
+static struct platform_driver sun6i_rtc_driver = {
+ .probe = sun6i_rtc_probe,
+ .remove = sun6i_rtc_remove,
+ .driver = {
+ .name = "sun6i-rtc",
+ .of_match_table = sun6i_rtc_dt_ids,
+ },
+};
+
+module_platform_driver(sun6i_rtc_driver);
+
+MODULE_DESCRIPTION("sun6i RTC driver");
+MODULE_AUTHOR("Chen-Yu Tsai <wens@csie.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig
index c854385..49e3f0c 100644
--- a/drivers/soc/Kconfig
+++ b/drivers/soc/Kconfig
@@ -1,5 +1,6 @@
menu "SOC (System On Chip) specific Drivers"
source "drivers/soc/qcom/Kconfig"
+source "drivers/soc/ti/Kconfig"
endmenu
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index 3b1b95d..0d6e35d 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -4,3 +4,4 @@
obj-$(CONFIG_ARCH_QCOM) += qcom/
obj-$(CONFIG_ARCH_TEGRA) += tegra/
+obj-$(CONFIG_SOC_TI) += ti/
diff --git a/drivers/soc/ti/Kconfig b/drivers/soc/ti/Kconfig
new file mode 100644
index 0000000..7266b21
--- /dev/null
+++ b/drivers/soc/ti/Kconfig
@@ -0,0 +1,31 @@
+#
+# TI SOC drivers
+#
+menuconfig SOC_TI
+ bool "TI SOC drivers support"
+
+if SOC_TI
+
+config KEYSTONE_NAVIGATOR_QMSS
+ tristate "Keystone Queue Manager Sub System"
+ depends on ARCH_KEYSTONE
+ help
+ Say y here to support the Keystone multicore Navigator Queue
+ Manager support. The Queue Manager is a hardware module that
+ is responsible for accelerating management of the packet queues.
+ Packets are queued/de-queued by writing/reading descriptor address
+ to a particular memory mapped location in the Queue Manager module.
+
+ If unsure, say N.
+
+config KEYSTONE_NAVIGATOR_DMA
+ tristate "TI Keystone Navigator Packet DMA support"
+ depends on ARCH_KEYSTONE
+ help
+ Say y tp enable support for the Keystone Navigator Packet DMA on
+ on Keystone family of devices. It sets up the dma channels for the
+ Queue Manager Sub System.
+
+ If unsure, say N.
+
+endif # SOC_TI
diff --git a/drivers/soc/ti/Makefile b/drivers/soc/ti/Makefile
new file mode 100644
index 0000000..6bed611
--- /dev/null
+++ b/drivers/soc/ti/Makefile
@@ -0,0 +1,5 @@
+#
+# TI Keystone SOC drivers
+#
+obj-$(CONFIG_KEYSTONE_NAVIGATOR_QMSS) += knav_qmss_queue.o knav_qmss_acc.o
+obj-$(CONFIG_KEYSTONE_NAVIGATOR_DMA) += knav_dma.o
diff --git a/drivers/soc/ti/knav_dma.c b/drivers/soc/ti/knav_dma.c
new file mode 100644
index 0000000..1726427
--- /dev/null
+++ b/drivers/soc/ti/knav_dma.c
@@ -0,0 +1,815 @@
+/*
+ * Copyright (C) 2014 Texas Instruments Incorporated
+ * Authors: Santosh Shilimkar <santosh.shilimkar@ti.com>
+ * Sandeep Nair <sandeep_n@ti.com>
+ * Cyril Chemparathy <cyril@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/dma-direction.h>
+#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
+#include <linux/of_dma.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/soc/ti/knav_dma.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#define REG_MASK 0xffffffff
+
+#define DMA_LOOPBACK BIT(31)
+#define DMA_ENABLE BIT(31)
+#define DMA_TEARDOWN BIT(30)
+
+#define DMA_TX_FILT_PSWORDS BIT(29)
+#define DMA_TX_FILT_EINFO BIT(30)
+#define DMA_TX_PRIO_SHIFT 0
+#define DMA_RX_PRIO_SHIFT 16
+#define DMA_PRIO_MASK GENMASK(3, 0)
+#define DMA_PRIO_DEFAULT 0
+#define DMA_RX_TIMEOUT_DEFAULT 17500 /* cycles */
+#define DMA_RX_TIMEOUT_MASK GENMASK(16, 0)
+#define DMA_RX_TIMEOUT_SHIFT 0
+
+#define CHAN_HAS_EPIB BIT(30)
+#define CHAN_HAS_PSINFO BIT(29)
+#define CHAN_ERR_RETRY BIT(28)
+#define CHAN_PSINFO_AT_SOP BIT(25)
+#define CHAN_SOP_OFF_SHIFT 16
+#define CHAN_SOP_OFF_MASK GENMASK(9, 0)
+#define DESC_TYPE_SHIFT 26
+#define DESC_TYPE_MASK GENMASK(2, 0)
+
+/*
+ * QMGR & QNUM together make up 14 bits with QMGR as the 2 MSb's in the logical
+ * navigator cloud mapping scheme.
+ * using the 14bit physical queue numbers directly maps into this scheme.
+ */
+#define CHAN_QNUM_MASK GENMASK(14, 0)
+#define DMA_MAX_QMS 4
+#define DMA_TIMEOUT 1 /* msecs */
+#define DMA_INVALID_ID 0xffff
+
+struct reg_global {
+ u32 revision;
+ u32 perf_control;
+ u32 emulation_control;
+ u32 priority_control;
+ u32 qm_base_address[DMA_MAX_QMS];
+};
+
+struct reg_chan {
+ u32 control;
+ u32 mode;
+ u32 __rsvd[6];
+};
+
+struct reg_tx_sched {
+ u32 prio;
+};
+
+struct reg_rx_flow {
+ u32 control;
+ u32 tags;
+ u32 tag_sel;
+ u32 fdq_sel[2];
+ u32 thresh[3];
+};
+
+struct knav_dma_pool_device {
+ struct device *dev;
+ struct list_head list;
+};
+
+struct knav_dma_device {
+ bool loopback, enable_all;
+ unsigned tx_priority, rx_priority, rx_timeout;
+ unsigned logical_queue_managers;
+ unsigned qm_base_address[DMA_MAX_QMS];
+ struct reg_global __iomem *reg_global;
+ struct reg_chan __iomem *reg_tx_chan;
+ struct reg_rx_flow __iomem *reg_rx_flow;
+ struct reg_chan __iomem *reg_rx_chan;
+ struct reg_tx_sched __iomem *reg_tx_sched;
+ unsigned max_rx_chan, max_tx_chan;
+ unsigned max_rx_flow;
+ char name[32];
+ atomic_t ref_count;
+ struct list_head list;
+ struct list_head chan_list;
+ spinlock_t lock;
+};
+
+struct knav_dma_chan {
+ enum dma_transfer_direction direction;
+ struct knav_dma_device *dma;
+ atomic_t ref_count;
+
+ /* registers */
+ struct reg_chan __iomem *reg_chan;
+ struct reg_tx_sched __iomem *reg_tx_sched;
+ struct reg_rx_flow __iomem *reg_rx_flow;
+
+ /* configuration stuff */
+ unsigned channel, flow;
+ struct knav_dma_cfg cfg;
+ struct list_head list;
+ spinlock_t lock;
+};
+
+#define chan_number(ch) ((ch->direction == DMA_MEM_TO_DEV) ? \
+ ch->channel : ch->flow)
+
+static struct knav_dma_pool_device *kdev;
+
+static bool check_config(struct knav_dma_chan *chan, struct knav_dma_cfg *cfg)
+{
+ if (!memcmp(&chan->cfg, cfg, sizeof(*cfg)))
+ return true;
+ else
+ return false;
+}
+
+static int chan_start(struct knav_dma_chan *chan,
+ struct knav_dma_cfg *cfg)
+{
+ u32 v = 0;
+
+ spin_lock(&chan->lock);
+ if ((chan->direction == DMA_MEM_TO_DEV) && chan->reg_chan) {
+ if (cfg->u.tx.filt_pswords)
+ v |= DMA_TX_FILT_PSWORDS;
+ if (cfg->u.tx.filt_einfo)
+ v |= DMA_TX_FILT_EINFO;
+ writel_relaxed(v, &chan->reg_chan->mode);
+ writel_relaxed(DMA_ENABLE, &chan->reg_chan->control);
+ }
+
+ if (chan->reg_tx_sched)
+ writel_relaxed(cfg->u.tx.priority, &chan->reg_tx_sched->prio);
+
+ if (chan->reg_rx_flow) {
+ v = 0;
+
+ if (cfg->u.rx.einfo_present)
+ v |= CHAN_HAS_EPIB;
+ if (cfg->u.rx.psinfo_present)
+ v |= CHAN_HAS_PSINFO;
+ if (cfg->u.rx.err_mode == DMA_RETRY)
+ v |= CHAN_ERR_RETRY;
+ v |= (cfg->u.rx.desc_type & DESC_TYPE_MASK) << DESC_TYPE_SHIFT;
+ if (cfg->u.rx.psinfo_at_sop)
+ v |= CHAN_PSINFO_AT_SOP;
+ v |= (cfg->u.rx.sop_offset & CHAN_SOP_OFF_MASK)
+ << CHAN_SOP_OFF_SHIFT;
+ v |= cfg->u.rx.dst_q & CHAN_QNUM_MASK;
+
+ writel_relaxed(v, &chan->reg_rx_flow->control);
+ writel_relaxed(0, &chan->reg_rx_flow->tags);
+ writel_relaxed(0, &chan->reg_rx_flow->tag_sel);
+
+ v = cfg->u.rx.fdq[0] << 16;
+ v |= cfg->u.rx.fdq[1] & CHAN_QNUM_MASK;
+ writel_relaxed(v, &chan->reg_rx_flow->fdq_sel[0]);
+
+ v = cfg->u.rx.fdq[2] << 16;
+ v |= cfg->u.rx.fdq[3] & CHAN_QNUM_MASK;
+ writel_relaxed(v, &chan->reg_rx_flow->fdq_sel[1]);
+
+ writel_relaxed(0, &chan->reg_rx_flow->thresh[0]);
+ writel_relaxed(0, &chan->reg_rx_flow->thresh[1]);
+ writel_relaxed(0, &chan->reg_rx_flow->thresh[2]);
+ }
+
+ /* Keep a copy of the cfg */
+ memcpy(&chan->cfg, cfg, sizeof(*cfg));
+ spin_unlock(&chan->lock);
+
+ return 0;
+}
+
+static int chan_teardown(struct knav_dma_chan *chan)
+{
+ unsigned long end, value;
+
+ if (!chan->reg_chan)
+ return 0;
+
+ /* indicate teardown */
+ writel_relaxed(DMA_TEARDOWN, &chan->reg_chan->control);
+
+ /* wait for the dma to shut itself down */
+ end = jiffies + msecs_to_jiffies(DMA_TIMEOUT);
+ do {
+ value = readl_relaxed(&chan->reg_chan->control);
+ if ((value & DMA_ENABLE) == 0)
+ break;
+ } while (time_after(end, jiffies));
+
+ if (readl_relaxed(&chan->reg_chan->control) & DMA_ENABLE) {
+ dev_err(kdev->dev, "timeout waiting for teardown\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void chan_stop(struct knav_dma_chan *chan)
+{
+ spin_lock(&chan->lock);
+ if (chan->reg_rx_flow) {
+ /* first detach fdqs, starve out the flow */
+ writel_relaxed(0, &chan->reg_rx_flow->fdq_sel[0]);
+ writel_relaxed(0, &chan->reg_rx_flow->fdq_sel[1]);
+ writel_relaxed(0, &chan->reg_rx_flow->thresh[0]);
+ writel_relaxed(0, &chan->reg_rx_flow->thresh[1]);
+ writel_relaxed(0, &chan->reg_rx_flow->thresh[2]);
+ }
+
+ /* teardown the dma channel */
+ chan_teardown(chan);
+
+ /* then disconnect the completion side */
+ if (chan->reg_rx_flow) {
+ writel_relaxed(0, &chan->reg_rx_flow->control);
+ writel_relaxed(0, &chan->reg_rx_flow->tags);
+ writel_relaxed(0, &chan->reg_rx_flow->tag_sel);
+ }
+
+ memset(&chan->cfg, 0, sizeof(struct knav_dma_cfg));
+ spin_unlock(&chan->lock);
+
+ dev_dbg(kdev->dev, "channel stopped\n");
+}
+
+static void dma_hw_enable_all(struct knav_dma_device *dma)
+{
+ int i;
+
+ for (i = 0; i < dma->max_tx_chan; i++) {
+ writel_relaxed(0, &dma->reg_tx_chan[i].mode);
+ writel_relaxed(DMA_ENABLE, &dma->reg_tx_chan[i].control);
+ }
+}
+
+
+static void knav_dma_hw_init(struct knav_dma_device *dma)
+{
+ unsigned v;
+ int i;
+
+ spin_lock(&dma->lock);
+ v = dma->loopback ? DMA_LOOPBACK : 0;
+ writel_relaxed(v, &dma->reg_global->emulation_control);
+
+ v = readl_relaxed(&dma->reg_global->perf_control);
+ v |= ((dma->rx_timeout & DMA_RX_TIMEOUT_MASK) << DMA_RX_TIMEOUT_SHIFT);
+ writel_relaxed(v, &dma->reg_global->perf_control);
+
+ v = ((dma->tx_priority << DMA_TX_PRIO_SHIFT) |
+ (dma->rx_priority << DMA_RX_PRIO_SHIFT));
+
+ writel_relaxed(v, &dma->reg_global->priority_control);
+
+ /* Always enable all Rx channels. Rx paths are managed using flows */
+ for (i = 0; i < dma->max_rx_chan; i++)
+ writel_relaxed(DMA_ENABLE, &dma->reg_rx_chan[i].control);
+
+ for (i = 0; i < dma->logical_queue_managers; i++)
+ writel_relaxed(dma->qm_base_address[i],
+ &dma->reg_global->qm_base_address[i]);
+ spin_unlock(&dma->lock);
+}
+
+static void knav_dma_hw_destroy(struct knav_dma_device *dma)
+{
+ int i;
+ unsigned v;
+
+ spin_lock(&dma->lock);
+ v = ~DMA_ENABLE & REG_MASK;
+
+ for (i = 0; i < dma->max_rx_chan; i++)
+ writel_relaxed(v, &dma->reg_rx_chan[i].control);
+
+ for (i = 0; i < dma->max_tx_chan; i++)
+ writel_relaxed(v, &dma->reg_tx_chan[i].control);
+ spin_unlock(&dma->lock);
+}
+
+static void dma_debug_show_channels(struct seq_file *s,
+ struct knav_dma_chan *chan)
+{
+ int i;
+
+ seq_printf(s, "\t%s %d:\t",
+ ((chan->direction == DMA_MEM_TO_DEV) ? "tx chan" : "rx flow"),
+ chan_number(chan));
+
+ if (chan->direction == DMA_MEM_TO_DEV) {
+ seq_printf(s, "einfo - %d, pswords - %d, priority - %d\n",
+ chan->cfg.u.tx.filt_einfo,
+ chan->cfg.u.tx.filt_pswords,
+ chan->cfg.u.tx.priority);
+ } else {
+ seq_printf(s, "einfo - %d, psinfo - %d, desc_type - %d\n",
+ chan->cfg.u.rx.einfo_present,
+ chan->cfg.u.rx.psinfo_present,
+ chan->cfg.u.rx.desc_type);
+ seq_printf(s, "\t\t\tdst_q: [%d], thresh: %d fdq: ",
+ chan->cfg.u.rx.dst_q,
+ chan->cfg.u.rx.thresh);
+ for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN; i++)
+ seq_printf(s, "[%d]", chan->cfg.u.rx.fdq[i]);
+ seq_printf(s, "\n");
+ }
+}
+
+static void dma_debug_show_devices(struct seq_file *s,
+ struct knav_dma_device *dma)
+{
+ struct knav_dma_chan *chan;
+
+ list_for_each_entry(chan, &dma->chan_list, list) {
+ if (atomic_read(&chan->ref_count))
+ dma_debug_show_channels(s, chan);
+ }
+}
+
+static int dma_debug_show(struct seq_file *s, void *v)
+{
+ struct knav_dma_device *dma;
+
+ list_for_each_entry(dma, &kdev->list, list) {
+ if (atomic_read(&dma->ref_count)) {
+ seq_printf(s, "%s : max_tx_chan: (%d), max_rx_flows: (%d)\n",
+ dma->name, dma->max_tx_chan, dma->max_rx_flow);
+ dma_debug_show_devices(s, dma);
+ }
+ }
+
+ return 0;
+}
+
+static int knav_dma_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dma_debug_show, NULL);
+}
+
+static const struct file_operations knav_dma_debug_ops = {
+ .open = knav_dma_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int of_channel_match_helper(struct device_node *np, const char *name,
+ const char **dma_instance)
+{
+ struct of_phandle_args args;
+ struct device_node *dma_node;
+ int index;
+
+ dma_node = of_parse_phandle(np, "ti,navigator-dmas", 0);
+ if (!dma_node)
+ return -ENODEV;
+
+ *dma_instance = dma_node->name;
+ index = of_property_match_string(np, "ti,navigator-dma-names", name);
+ if (index < 0) {
+ dev_err(kdev->dev, "No 'ti,navigator-dma-names' propery\n");
+ return -ENODEV;
+ }
+
+ if (of_parse_phandle_with_fixed_args(np, "ti,navigator-dmas",
+ 1, index, &args)) {
+ dev_err(kdev->dev, "Missing the pahndle args name %s\n", name);
+ return -ENODEV;
+ }
+
+ if (args.args[0] < 0) {
+ dev_err(kdev->dev, "Missing args for %s\n", name);
+ return -ENODEV;
+ }
+
+ return args.args[0];
+}
+
+/**
+ * knav_dma_open_channel() - try to setup an exclusive slave channel
+ * @dev: pointer to client device structure
+ * @name: slave channel name
+ * @config: dma configuration parameters
+ *
+ * Returns pointer to appropriate DMA channel on success or NULL.
+ */
+void *knav_dma_open_channel(struct device *dev, const char *name,
+ struct knav_dma_cfg *config)
+{
+ struct knav_dma_chan *chan;
+ struct knav_dma_device *dma;
+ bool found = false;
+ int chan_num = -1;
+ const char *instance;
+
+ if (!kdev) {
+ pr_err("keystone-navigator-dma driver not registered\n");
+ return (void *)-EINVAL;
+ }
+
+ chan_num = of_channel_match_helper(dev->of_node, name, &instance);
+ if (chan_num < 0) {
+ dev_err(kdev->dev, "No DMA instace with name %s\n", name);
+ return (void *)-EINVAL;
+ }
+
+ dev_dbg(kdev->dev, "initializing %s channel %d from DMA %s\n",
+ config->direction == DMA_MEM_TO_DEV ? "transmit" :
+ config->direction == DMA_DEV_TO_MEM ? "receive" :
+ "unknown", chan_num, instance);
+
+ if (config->direction != DMA_MEM_TO_DEV &&
+ config->direction != DMA_DEV_TO_MEM) {
+ dev_err(kdev->dev, "bad direction\n");
+ return (void *)-EINVAL;
+ }
+
+ /* Look for correct dma instance */
+ list_for_each_entry(dma, &kdev->list, list) {
+ if (!strcmp(dma->name, instance)) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ dev_err(kdev->dev, "No DMA instace with name %s\n", instance);
+ return (void *)-EINVAL;
+ }
+
+ /* Look for correct dma channel from dma instance */
+ found = false;
+ list_for_each_entry(chan, &dma->chan_list, list) {
+ if (config->direction == DMA_MEM_TO_DEV) {
+ if (chan->channel == chan_num) {
+ found = true;
+ break;
+ }
+ } else {
+ if (chan->flow == chan_num) {
+ found = true;
+ break;
+ }
+ }
+ }
+ if (!found) {
+ dev_err(kdev->dev, "channel %d is not in DMA %s\n",
+ chan_num, instance);
+ return (void *)-EINVAL;
+ }
+
+ if (atomic_read(&chan->ref_count) >= 1) {
+ if (!check_config(chan, config)) {
+ dev_err(kdev->dev, "channel %d config miss-match\n",
+ chan_num);
+ return (void *)-EINVAL;
+ }
+ }
+
+ if (atomic_inc_return(&chan->dma->ref_count) <= 1)
+ knav_dma_hw_init(chan->dma);
+
+ if (atomic_inc_return(&chan->ref_count) <= 1)
+ chan_start(chan, config);
+
+ dev_dbg(kdev->dev, "channel %d opened from DMA %s\n",
+ chan_num, instance);
+
+ return chan;
+}
+EXPORT_SYMBOL_GPL(knav_dma_open_channel);
+
+/**
+ * knav_dma_close_channel() - Destroy a dma channel
+ *
+ * channel: dma channel handle
+ *
+ */
+void knav_dma_close_channel(void *channel)
+{
+ struct knav_dma_chan *chan = channel;
+
+ if (!kdev) {
+ pr_err("keystone-navigator-dma driver not registered\n");
+ return;
+ }
+
+ if (atomic_dec_return(&chan->ref_count) <= 0)
+ chan_stop(chan);
+
+ if (atomic_dec_return(&chan->dma->ref_count) <= 0)
+ knav_dma_hw_destroy(chan->dma);
+
+ dev_dbg(kdev->dev, "channel %d or flow %d closed from DMA %s\n",
+ chan->channel, chan->flow, chan->dma->name);
+}
+EXPORT_SYMBOL_GPL(knav_dma_close_channel);
+
+static void __iomem *pktdma_get_regs(struct knav_dma_device *dma,
+ struct device_node *node,
+ unsigned index, resource_size_t *_size)
+{
+ struct device *dev = kdev->dev;
+ struct resource res;
+ void __iomem *regs;
+ int ret;
+
+ ret = of_address_to_resource(node, index, &res);
+ if (ret) {
+ dev_err(dev, "Can't translate of node(%s) address for index(%d)\n",
+ node->name, index);
+ return ERR_PTR(ret);
+ }
+
+ regs = devm_ioremap_resource(kdev->dev, &res);
+ if (IS_ERR(regs))
+ dev_err(dev, "Failed to map register base for index(%d) node(%s)\n",
+ index, node->name);
+ if (_size)
+ *_size = resource_size(&res);
+
+ return regs;
+}
+
+static int pktdma_init_rx_chan(struct knav_dma_chan *chan, u32 flow)
+{
+ struct knav_dma_device *dma = chan->dma;
+
+ chan->flow = flow;
+ chan->reg_rx_flow = dma->reg_rx_flow + flow;
+ chan->channel = DMA_INVALID_ID;
+ dev_dbg(kdev->dev, "rx flow(%d) (%p)\n", chan->flow, chan->reg_rx_flow);
+
+ return 0;
+}
+
+static int pktdma_init_tx_chan(struct knav_dma_chan *chan, u32 channel)
+{
+ struct knav_dma_device *dma = chan->dma;
+
+ chan->channel = channel;
+ chan->reg_chan = dma->reg_tx_chan + channel;
+ chan->reg_tx_sched = dma->reg_tx_sched + channel;
+ chan->flow = DMA_INVALID_ID;
+ dev_dbg(kdev->dev, "tx channel(%d) (%p)\n", chan->channel, chan->reg_chan);
+
+ return 0;
+}
+
+static int pktdma_init_chan(struct knav_dma_device *dma,
+ enum dma_transfer_direction dir,
+ unsigned chan_num)
+{
+ struct device *dev = kdev->dev;
+ struct knav_dma_chan *chan;
+ int ret = -EINVAL;
+
+ chan = devm_kzalloc(dev, sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&chan->list);
+ chan->dma = dma;
+ chan->direction = DMA_NONE;
+ atomic_set(&chan->ref_count, 0);
+ spin_lock_init(&chan->lock);
+
+ if (dir == DMA_MEM_TO_DEV) {
+ chan->direction = dir;
+ ret = pktdma_init_tx_chan(chan, chan_num);
+ } else if (dir == DMA_DEV_TO_MEM) {
+ chan->direction = dir;
+ ret = pktdma_init_rx_chan(chan, chan_num);
+ } else {
+ dev_err(dev, "channel(%d) direction unknown\n", chan_num);
+ }
+
+ list_add_tail(&chan->list, &dma->chan_list);
+
+ return ret;
+}
+
+static int dma_init(struct device_node *cloud, struct device_node *dma_node)
+{
+ unsigned max_tx_chan, max_rx_chan, max_rx_flow, max_tx_sched;
+ struct device_node *node = dma_node;
+ struct knav_dma_device *dma;
+ int ret, len, num_chan = 0;
+ resource_size_t size;
+ u32 timeout;
+ u32 i;
+
+ dma = devm_kzalloc(kdev->dev, sizeof(*dma), GFP_KERNEL);
+ if (!dma) {
+ dev_err(kdev->dev, "could not allocate driver mem\n");
+ return -ENOMEM;
+ }
+ INIT_LIST_HEAD(&dma->list);
+ INIT_LIST_HEAD(&dma->chan_list);
+
+ if (!of_find_property(cloud, "ti,navigator-cloud-address", &len)) {
+ dev_err(kdev->dev, "unspecified navigator cloud addresses\n");
+ return -ENODEV;
+ }
+
+ dma->logical_queue_managers = len / sizeof(u32);
+ if (dma->logical_queue_managers > DMA_MAX_QMS) {
+ dev_warn(kdev->dev, "too many queue mgrs(>%d) rest ignored\n",
+ dma->logical_queue_managers);
+ dma->logical_queue_managers = DMA_MAX_QMS;
+ }
+
+ ret = of_property_read_u32_array(cloud, "ti,navigator-cloud-address",
+ dma->qm_base_address,
+ dma->logical_queue_managers);
+ if (ret) {
+ dev_err(kdev->dev, "invalid navigator cloud addresses\n");
+ return -ENODEV;
+ }
+
+ dma->reg_global = pktdma_get_regs(dma, node, 0, &size);
+ if (!dma->reg_global)
+ return -ENODEV;
+ if (size < sizeof(struct reg_global)) {
+ dev_err(kdev->dev, "bad size %pa for global regs\n", &size);
+ return -ENODEV;
+ }
+
+ dma->reg_tx_chan = pktdma_get_regs(dma, node, 1, &size);
+ if (!dma->reg_tx_chan)
+ return -ENODEV;
+
+ max_tx_chan = size / sizeof(struct reg_chan);
+ dma->reg_rx_chan = pktdma_get_regs(dma, node, 2, &size);
+ if (!dma->reg_rx_chan)
+ return -ENODEV;
+
+ max_rx_chan = size / sizeof(struct reg_chan);
+ dma->reg_tx_sched = pktdma_get_regs(dma, node, 3, &size);
+ if (!dma->reg_tx_sched)
+ return -ENODEV;
+
+ max_tx_sched = size / sizeof(struct reg_tx_sched);
+ dma->reg_rx_flow = pktdma_get_regs(dma, node, 4, &size);
+ if (!dma->reg_rx_flow)
+ return -ENODEV;
+
+ max_rx_flow = size / sizeof(struct reg_rx_flow);
+ dma->rx_priority = DMA_PRIO_DEFAULT;
+ dma->tx_priority = DMA_PRIO_DEFAULT;
+
+ dma->enable_all = (of_get_property(node, "ti,enable-all", NULL) != NULL);
+ dma->loopback = (of_get_property(node, "ti,loop-back", NULL) != NULL);
+
+ ret = of_property_read_u32(node, "ti,rx-retry-timeout", &timeout);
+ if (ret < 0) {
+ dev_dbg(kdev->dev, "unspecified rx timeout using value %d\n",
+ DMA_RX_TIMEOUT_DEFAULT);
+ timeout = DMA_RX_TIMEOUT_DEFAULT;
+ }
+
+ dma->rx_timeout = timeout;
+ dma->max_rx_chan = max_rx_chan;
+ dma->max_rx_flow = max_rx_flow;
+ dma->max_tx_chan = min(max_tx_chan, max_tx_sched);
+ atomic_set(&dma->ref_count, 0);
+ strcpy(dma->name, node->name);
+ spin_lock_init(&dma->lock);
+
+ for (i = 0; i < dma->max_tx_chan; i++) {
+ if (pktdma_init_chan(dma, DMA_MEM_TO_DEV, i) >= 0)
+ num_chan++;
+ }
+
+ for (i = 0; i < dma->max_rx_flow; i++) {
+ if (pktdma_init_chan(dma, DMA_DEV_TO_MEM, i) >= 0)
+ num_chan++;
+ }
+
+ list_add_tail(&dma->list, &kdev->list);
+
+ /*
+ * For DSP software usecases or userpace transport software, setup all
+ * the DMA hardware resources.
+ */
+ if (dma->enable_all) {
+ atomic_inc(&dma->ref_count);
+ knav_dma_hw_init(dma);
+ dma_hw_enable_all(dma);
+ }
+
+ dev_info(kdev->dev, "DMA %s registered %d logical channels, flows %d, tx chans: %d, rx chans: %d%s\n",
+ dma->name, num_chan, dma->max_rx_flow,
+ dma->max_tx_chan, dma->max_rx_chan,
+ dma->loopback ? ", loopback" : "");
+
+ return 0;
+}
+
+static int knav_dma_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = pdev->dev.of_node;
+ struct device_node *child;
+ int ret = 0;
+
+ if (!node) {
+ dev_err(&pdev->dev, "could not find device info\n");
+ return -EINVAL;
+ }
+
+ kdev = devm_kzalloc(dev,
+ sizeof(struct knav_dma_pool_device), GFP_KERNEL);
+ if (!kdev) {
+ dev_err(dev, "could not allocate driver mem\n");
+ return -ENOMEM;
+ }
+
+ kdev->dev = dev;
+ INIT_LIST_HEAD(&kdev->list);
+
+ pm_runtime_enable(kdev->dev);
+ ret = pm_runtime_get_sync(kdev->dev);
+ if (ret < 0) {
+ dev_err(kdev->dev, "unable to enable pktdma, err %d\n", ret);
+ return ret;
+ }
+
+ /* Initialise all packet dmas */
+ for_each_child_of_node(node, child) {
+ ret = dma_init(node, child);
+ if (ret) {
+ dev_err(&pdev->dev, "init failed with %d\n", ret);
+ break;
+ }
+ }
+
+ if (list_empty(&kdev->list)) {
+ dev_err(dev, "no valid dma instance\n");
+ return -ENODEV;
+ }
+
+ debugfs_create_file("knav_dma", S_IFREG | S_IRUGO, NULL, NULL,
+ &knav_dma_debug_ops);
+
+ return ret;
+}
+
+static int knav_dma_remove(struct platform_device *pdev)
+{
+ struct knav_dma_device *dma;
+
+ list_for_each_entry(dma, &kdev->list, list) {
+ if (atomic_dec_return(&dma->ref_count) == 0)
+ knav_dma_hw_destroy(dma);
+ }
+
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static struct of_device_id of_match[] = {
+ { .compatible = "ti,keystone-navigator-dma", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, of_match);
+
+static struct platform_driver knav_dma_driver = {
+ .probe = knav_dma_probe,
+ .remove = knav_dma_remove,
+ .driver = {
+ .name = "keystone-navigator-dma",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match,
+ },
+};
+module_platform_driver(knav_dma_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI Keystone Navigator Packet DMA driver");
+MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com>");
+MODULE_AUTHOR("Santosh Shilimkar <santosh.shilimkar@ti.com>");
diff --git a/drivers/soc/ti/knav_qmss.h b/drivers/soc/ti/knav_qmss.h
new file mode 100644
index 0000000..bc9dcc8
--- /dev/null
+++ b/drivers/soc/ti/knav_qmss.h
@@ -0,0 +1,386 @@
+/*
+ * Keystone Navigator QMSS driver internal header
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Sandeep Nair <sandeep_n@ti.com>
+ * Cyril Chemparathy <cyril@ti.com>
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __KNAV_QMSS_H__
+#define __KNAV_QMSS_H__
+
+#define THRESH_GTE BIT(7)
+#define THRESH_LT 0
+
+#define PDSP_CTRL_PC_MASK 0xffff0000
+#define PDSP_CTRL_SOFT_RESET BIT(0)
+#define PDSP_CTRL_ENABLE BIT(1)
+#define PDSP_CTRL_RUNNING BIT(15)
+
+#define ACC_MAX_CHANNEL 48
+#define ACC_DEFAULT_PERIOD 25 /* usecs */
+
+#define ACC_CHANNEL_INT_BASE 2
+
+#define ACC_LIST_ENTRY_TYPE 1
+#define ACC_LIST_ENTRY_WORDS (1 << ACC_LIST_ENTRY_TYPE)
+#define ACC_LIST_ENTRY_QUEUE_IDX 0
+#define ACC_LIST_ENTRY_DESC_IDX (ACC_LIST_ENTRY_WORDS - 1)
+
+#define ACC_CMD_DISABLE_CHANNEL 0x80
+#define ACC_CMD_ENABLE_CHANNEL 0x81
+#define ACC_CFG_MULTI_QUEUE BIT(21)
+
+#define ACC_INTD_OFFSET_EOI (0x0010)
+#define ACC_INTD_OFFSET_COUNT(ch) (0x0300 + 4 * (ch))
+#define ACC_INTD_OFFSET_STATUS(ch) (0x0200 + 4 * ((ch) / 32))
+
+#define RANGE_MAX_IRQS 64
+
+#define ACC_DESCS_MAX SZ_1K
+#define ACC_DESCS_MASK (ACC_DESCS_MAX - 1)
+#define DESC_SIZE_MASK 0xful
+#define DESC_PTR_MASK (~DESC_SIZE_MASK)
+
+#define KNAV_NAME_SIZE 32
+
+enum knav_acc_result {
+ ACC_RET_IDLE,
+ ACC_RET_SUCCESS,
+ ACC_RET_INVALID_COMMAND,
+ ACC_RET_INVALID_CHANNEL,
+ ACC_RET_INACTIVE_CHANNEL,
+ ACC_RET_ACTIVE_CHANNEL,
+ ACC_RET_INVALID_QUEUE,
+ ACC_RET_INVALID_RET,
+};
+
+struct knav_reg_config {
+ u32 revision;
+ u32 __pad1;
+ u32 divert;
+ u32 link_ram_base0;
+ u32 link_ram_size0;
+ u32 link_ram_base1;
+ u32 __pad2[2];
+ u32 starvation[0];
+};
+
+struct knav_reg_region {
+ u32 base;
+ u32 start_index;
+ u32 size_count;
+ u32 __pad;
+};
+
+struct knav_reg_pdsp_regs {
+ u32 control;
+ u32 status;
+ u32 cycle_count;
+ u32 stall_count;
+};
+
+struct knav_reg_acc_command {
+ u32 command;
+ u32 queue_mask;
+ u32 list_phys;
+ u32 queue_num;
+ u32 timer_config;
+};
+
+struct knav_link_ram_block {
+ dma_addr_t phys;
+ void *virt;
+ size_t size;
+};
+
+struct knav_acc_info {
+ u32 pdsp_id;
+ u32 start_channel;
+ u32 list_entries;
+ u32 pacing_mode;
+ u32 timer_count;
+ int mem_size;
+ int list_size;
+ struct knav_pdsp_info *pdsp;
+};
+
+struct knav_acc_channel {
+ u32 channel;
+ u32 list_index;
+ u32 open_mask;
+ u32 *list_cpu[2];
+ dma_addr_t list_dma[2];
+ char name[KNAV_NAME_SIZE];
+ atomic_t retrigger_count;
+};
+
+struct knav_pdsp_info {
+ const char *name;
+ struct knav_reg_pdsp_regs __iomem *regs;
+ union {
+ void __iomem *command;
+ struct knav_reg_acc_command __iomem *acc_command;
+ u32 __iomem *qos_command;
+ };
+ void __iomem *intd;
+ u32 __iomem *iram;
+ const char *firmware;
+ u32 id;
+ struct list_head list;
+};
+
+struct knav_qmgr_info {
+ unsigned start_queue;
+ unsigned num_queues;
+ struct knav_reg_config __iomem *reg_config;
+ struct knav_reg_region __iomem *reg_region;
+ struct knav_reg_queue __iomem *reg_push, *reg_pop, *reg_peek;
+ void __iomem *reg_status;
+ struct list_head list;
+};
+
+#define KNAV_NUM_LINKRAM 2
+
+/**
+ * struct knav_queue_stats: queue statistics
+ * pushes: number of push operations
+ * pops: number of pop operations
+ * push_errors: number of push errors
+ * pop_errors: number of pop errors
+ * notifies: notifier counts
+ */
+struct knav_queue_stats {
+ atomic_t pushes;
+ atomic_t pops;
+ atomic_t push_errors;
+ atomic_t pop_errors;
+ atomic_t notifies;
+};
+
+/**
+ * struct knav_reg_queue: queue registers
+ * @entry_count: valid entries in the queue
+ * @byte_count: total byte count in thhe queue
+ * @packet_size: packet size for the queue
+ * @ptr_size_thresh: packet pointer size threshold
+ */
+struct knav_reg_queue {
+ u32 entry_count;
+ u32 byte_count;
+ u32 packet_size;
+ u32 ptr_size_thresh;
+};
+
+/**
+ * struct knav_region: qmss region info
+ * @dma_start, dma_end: start and end dma address
+ * @virt_start, virt_end: start and end virtual address
+ * @desc_size: descriptor size
+ * @used_desc: consumed descriptors
+ * @id: region number
+ * @num_desc: total descriptors
+ * @link_index: index of the first descriptor
+ * @name: region name
+ * @list: instance in the device's region list
+ * @pools: list of descriptor pools in the region
+ */
+struct knav_region {
+ dma_addr_t dma_start, dma_end;
+ void *virt_start, *virt_end;
+ unsigned desc_size;
+ unsigned used_desc;
+ unsigned id;
+ unsigned num_desc;
+ unsigned link_index;
+ const char *name;
+ struct list_head list;
+ struct list_head pools;
+};
+
+/**
+ * struct knav_pool: qmss pools
+ * @dev: device pointer
+ * @region: qmss region info
+ * @queue: queue registers
+ * @kdev: qmss device pointer
+ * @region_offset: offset from the base
+ * @num_desc: total descriptors
+ * @desc_size: descriptor size
+ * @region_id: region number
+ * @name: pool name
+ * @list: list head
+ * @region_inst: instance in the region's pool list
+ */
+struct knav_pool {
+ struct device *dev;
+ struct knav_region *region;
+ struct knav_queue *queue;
+ struct knav_device *kdev;
+ int region_offset;
+ int num_desc;
+ int desc_size;
+ int region_id;
+ const char *name;
+ struct list_head list;
+ struct list_head region_inst;
+};
+
+/**
+ * struct knav_queue_inst: qmss queue instace properties
+ * @descs: descriptor pointer
+ * @desc_head, desc_tail, desc_count: descriptor counters
+ * @acc: accumulator channel pointer
+ * @kdev: qmss device pointer
+ * @range: range info
+ * @qmgr: queue manager info
+ * @id: queue instace id
+ * @irq_num: irq line number
+ * @notify_needed: notifier needed based on queue type
+ * @num_notifiers: total notifiers
+ * @handles: list head
+ * @name: queue instance name
+ * @irq_name: irq line name
+ */
+struct knav_queue_inst {
+ u32 *descs;
+ atomic_t desc_head, desc_tail, desc_count;
+ struct knav_acc_channel *acc;
+ struct knav_device *kdev;
+ struct knav_range_info *range;
+ struct knav_qmgr_info *qmgr;
+ u32 id;
+ int irq_num;
+ int notify_needed;
+ atomic_t num_notifiers;
+ struct list_head handles;
+ const char *name;
+ const char *irq_name;
+};
+
+/**
+ * struct knav_queue: qmss queue properties
+ * @reg_push, reg_pop, reg_peek: push, pop queue registers
+ * @inst: qmss queue instace properties
+ * @notifier_fn: notifier function
+ * @notifier_fn_arg: notifier function argument
+ * @notifier_enabled: notier enabled for a give queue
+ * @rcu: rcu head
+ * @flags: queue flags
+ * @list: list head
+ */
+struct knav_queue {
+ struct knav_reg_queue __iomem *reg_push, *reg_pop, *reg_peek;
+ struct knav_queue_inst *inst;
+ struct knav_queue_stats stats;
+ knav_queue_notify_fn notifier_fn;
+ void *notifier_fn_arg;
+ atomic_t notifier_enabled;
+ struct rcu_head rcu;
+ unsigned flags;
+ struct list_head list;
+};
+
+struct knav_device {
+ struct device *dev;
+ unsigned base_id;
+ unsigned num_queues;
+ unsigned num_queues_in_use;
+ unsigned inst_shift;
+ struct knav_link_ram_block link_rams[KNAV_NUM_LINKRAM];
+ void *instances;
+ struct list_head regions;
+ struct list_head queue_ranges;
+ struct list_head pools;
+ struct list_head pdsps;
+ struct list_head qmgrs;
+};
+
+struct knav_range_ops {
+ int (*init_range)(struct knav_range_info *range);
+ int (*free_range)(struct knav_range_info *range);
+ int (*init_queue)(struct knav_range_info *range,
+ struct knav_queue_inst *inst);
+ int (*open_queue)(struct knav_range_info *range,
+ struct knav_queue_inst *inst, unsigned flags);
+ int (*close_queue)(struct knav_range_info *range,
+ struct knav_queue_inst *inst);
+ int (*set_notify)(struct knav_range_info *range,
+ struct knav_queue_inst *inst, bool enabled);
+};
+
+struct knav_irq_info {
+ int irq;
+ u32 cpu_map;
+};
+
+struct knav_range_info {
+ const char *name;
+ struct knav_device *kdev;
+ unsigned queue_base;
+ unsigned num_queues;
+ void *queue_base_inst;
+ unsigned flags;
+ struct list_head list;
+ struct knav_range_ops *ops;
+ struct knav_acc_info acc_info;
+ struct knav_acc_channel *acc;
+ unsigned num_irqs;
+ struct knav_irq_info irqs[RANGE_MAX_IRQS];
+};
+
+#define RANGE_RESERVED BIT(0)
+#define RANGE_HAS_IRQ BIT(1)
+#define RANGE_HAS_ACCUMULATOR BIT(2)
+#define RANGE_MULTI_QUEUE BIT(3)
+
+#define for_each_region(kdev, region) \
+ list_for_each_entry(region, &kdev->regions, list)
+
+#define first_region(kdev) \
+ list_first_entry(&kdev->regions, \
+ struct knav_region, list)
+
+#define for_each_queue_range(kdev, range) \
+ list_for_each_entry(range, &kdev->queue_ranges, list)
+
+#define first_queue_range(kdev) \
+ list_first_entry(&kdev->queue_ranges, \
+ struct knav_range_info, list)
+
+#define for_each_pool(kdev, pool) \
+ list_for_each_entry(pool, &kdev->pools, list)
+
+#define for_each_pdsp(kdev, pdsp) \
+ list_for_each_entry(pdsp, &kdev->pdsps, list)
+
+#define for_each_qmgr(kdev, qmgr) \
+ list_for_each_entry(qmgr, &kdev->qmgrs, list)
+
+static inline struct knav_pdsp_info *
+knav_find_pdsp(struct knav_device *kdev, unsigned pdsp_id)
+{
+ struct knav_pdsp_info *pdsp;
+
+ for_each_pdsp(kdev, pdsp)
+ if (pdsp_id == pdsp->id)
+ return pdsp;
+ return NULL;
+}
+
+extern int knav_init_acc_range(struct knav_device *kdev,
+ struct device_node *node,
+ struct knav_range_info *range);
+extern void knav_queue_notify(struct knav_queue_inst *inst);
+
+#endif /* __KNAV_QMSS_H__ */
diff --git a/drivers/soc/ti/knav_qmss_acc.c b/drivers/soc/ti/knav_qmss_acc.c
new file mode 100644
index 0000000..6fbfde6e
--- /dev/null
+++ b/drivers/soc/ti/knav_qmss_acc.c
@@ -0,0 +1,591 @@
+/*
+ * Keystone accumulator queue manager
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Sandeep Nair <sandeep_n@ti.com>
+ * Cyril Chemparathy <cyril@ti.com>
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/soc/ti/knav_qmss.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/firmware.h>
+
+#include "knav_qmss.h"
+
+#define knav_range_offset_to_inst(kdev, range, q) \
+ (range->queue_base_inst + (q << kdev->inst_shift))
+
+static void __knav_acc_notify(struct knav_range_info *range,
+ struct knav_acc_channel *acc)
+{
+ struct knav_device *kdev = range->kdev;
+ struct knav_queue_inst *inst;
+ int range_base, queue;
+
+ range_base = kdev->base_id + range->queue_base;
+
+ if (range->flags & RANGE_MULTI_QUEUE) {
+ for (queue = 0; queue < range->num_queues; queue++) {
+ inst = knav_range_offset_to_inst(kdev, range,
+ queue);
+ if (inst->notify_needed) {
+ inst->notify_needed = 0;
+ dev_dbg(kdev->dev, "acc-irq: notifying %d\n",
+ range_base + queue);
+ knav_queue_notify(inst);
+ }
+ }
+ } else {
+ queue = acc->channel - range->acc_info.start_channel;
+ inst = knav_range_offset_to_inst(kdev, range, queue);
+ dev_dbg(kdev->dev, "acc-irq: notifying %d\n",
+ range_base + queue);
+ knav_queue_notify(inst);
+ }
+}
+
+static int knav_acc_set_notify(struct knav_range_info *range,
+ struct knav_queue_inst *kq,
+ bool enabled)
+{
+ struct knav_pdsp_info *pdsp = range->acc_info.pdsp;
+ struct knav_device *kdev = range->kdev;
+ u32 mask, offset;
+
+ /*
+ * when enabling, we need to re-trigger an interrupt if we
+ * have descriptors pending
+ */
+ if (!enabled || atomic_read(&kq->desc_count) <= 0)
+ return 0;
+
+ kq->notify_needed = 1;
+ atomic_inc(&kq->acc->retrigger_count);
+ mask = BIT(kq->acc->channel % 32);
+ offset = ACC_INTD_OFFSET_STATUS(kq->acc->channel);
+ dev_dbg(kdev->dev, "setup-notify: re-triggering irq for %s\n",
+ kq->acc->name);
+ writel_relaxed(mask, pdsp->intd + offset);
+ return 0;
+}
+
+static irqreturn_t knav_acc_int_handler(int irq, void *_instdata)
+{
+ struct knav_acc_channel *acc;
+ struct knav_queue_inst *kq = NULL;
+ struct knav_range_info *range;
+ struct knav_pdsp_info *pdsp;
+ struct knav_acc_info *info;
+ struct knav_device *kdev;
+
+ u32 *list, *list_cpu, val, idx, notifies;
+ int range_base, channel, queue = 0;
+ dma_addr_t list_dma;
+
+ range = _instdata;
+ info = &range->acc_info;
+ kdev = range->kdev;
+ pdsp = range->acc_info.pdsp;
+ acc = range->acc;
+
+ range_base = kdev->base_id + range->queue_base;
+ if ((range->flags & RANGE_MULTI_QUEUE) == 0) {
+ for (queue = 0; queue < range->num_irqs; queue++)
+ if (range->irqs[queue].irq == irq)
+ break;
+ kq = knav_range_offset_to_inst(kdev, range, queue);
+ acc += queue;
+ }
+
+ channel = acc->channel;
+ list_dma = acc->list_dma[acc->list_index];
+ list_cpu = acc->list_cpu[acc->list_index];
+ dev_dbg(kdev->dev, "acc-irq: channel %d, list %d, virt %p, phys %x\n",
+ channel, acc->list_index, list_cpu, list_dma);
+ if (atomic_read(&acc->retrigger_count)) {
+ atomic_dec(&acc->retrigger_count);
+ __knav_acc_notify(range, acc);
+ writel_relaxed(1, pdsp->intd + ACC_INTD_OFFSET_COUNT(channel));
+ /* ack the interrupt */
+ writel_relaxed(ACC_CHANNEL_INT_BASE + channel,
+ pdsp->intd + ACC_INTD_OFFSET_EOI);
+
+ return IRQ_HANDLED;
+ }
+
+ notifies = readl_relaxed(pdsp->intd + ACC_INTD_OFFSET_COUNT(channel));
+ WARN_ON(!notifies);
+ dma_sync_single_for_cpu(kdev->dev, list_dma, info->list_size,
+ DMA_FROM_DEVICE);
+
+ for (list = list_cpu; list < list_cpu + (info->list_size / sizeof(u32));
+ list += ACC_LIST_ENTRY_WORDS) {
+ if (ACC_LIST_ENTRY_WORDS == 1) {
+ dev_dbg(kdev->dev,
+ "acc-irq: list %d, entry @%p, %08x\n",
+ acc->list_index, list, list[0]);
+ } else if (ACC_LIST_ENTRY_WORDS == 2) {
+ dev_dbg(kdev->dev,
+ "acc-irq: list %d, entry @%p, %08x %08x\n",
+ acc->list_index, list, list[0], list[1]);
+ } else if (ACC_LIST_ENTRY_WORDS == 4) {
+ dev_dbg(kdev->dev,
+ "acc-irq: list %d, entry @%p, %08x %08x %08x %08x\n",
+ acc->list_index, list, list[0], list[1],
+ list[2], list[3]);
+ }
+
+ val = list[ACC_LIST_ENTRY_DESC_IDX];
+ if (!val)
+ break;
+
+ if (range->flags & RANGE_MULTI_QUEUE) {
+ queue = list[ACC_LIST_ENTRY_QUEUE_IDX] >> 16;
+ if (queue < range_base ||
+ queue >= range_base + range->num_queues) {
+ dev_err(kdev->dev,
+ "bad queue %d, expecting %d-%d\n",
+ queue, range_base,
+ range_base + range->num_queues);
+ break;
+ }
+ queue -= range_base;
+ kq = knav_range_offset_to_inst(kdev, range,
+ queue);
+ }
+
+ if (atomic_inc_return(&kq->desc_count) >= ACC_DESCS_MAX) {
+ atomic_dec(&kq->desc_count);
+ dev_err(kdev->dev,
+ "acc-irq: queue %d full, entry dropped\n",
+ queue + range_base);
+ continue;
+ }
+
+ idx = atomic_inc_return(&kq->desc_tail) & ACC_DESCS_MASK;
+ kq->descs[idx] = val;
+ kq->notify_needed = 1;
+ dev_dbg(kdev->dev, "acc-irq: enqueue %08x at %d, queue %d\n",
+ val, idx, queue + range_base);
+ }
+
+ __knav_acc_notify(range, acc);
+ memset(list_cpu, 0, info->list_size);
+ dma_sync_single_for_device(kdev->dev, list_dma, info->list_size,
+ DMA_TO_DEVICE);
+
+ /* flip to the other list */
+ acc->list_index ^= 1;
+
+ /* reset the interrupt counter */
+ writel_relaxed(1, pdsp->intd + ACC_INTD_OFFSET_COUNT(channel));
+
+ /* ack the interrupt */
+ writel_relaxed(ACC_CHANNEL_INT_BASE + channel,
+ pdsp->intd + ACC_INTD_OFFSET_EOI);
+
+ return IRQ_HANDLED;
+}
+
+int knav_range_setup_acc_irq(struct knav_range_info *range,
+ int queue, bool enabled)
+{
+ struct knav_device *kdev = range->kdev;
+ struct knav_acc_channel *acc;
+ unsigned long cpu_map;
+ int ret = 0, irq;
+ u32 old, new;
+
+ if (range->flags & RANGE_MULTI_QUEUE) {
+ acc = range->acc;
+ irq = range->irqs[0].irq;
+ cpu_map = range->irqs[0].cpu_map;
+ } else {
+ acc = range->acc + queue;
+ irq = range->irqs[queue].irq;
+ cpu_map = range->irqs[queue].cpu_map;
+ }
+
+ old = acc->open_mask;
+ if (enabled)
+ new = old | BIT(queue);
+ else
+ new = old & ~BIT(queue);
+ acc->open_mask = new;
+
+ dev_dbg(kdev->dev,
+ "setup-acc-irq: open mask old %08x, new %08x, channel %s\n",
+ old, new, acc->name);
+
+ if (likely(new == old))
+ return 0;
+
+ if (new && !old) {
+ dev_dbg(kdev->dev,
+ "setup-acc-irq: requesting %s for channel %s\n",
+ acc->name, acc->name);
+ ret = request_irq(irq, knav_acc_int_handler, 0, acc->name,
+ range);
+ if (!ret && cpu_map) {
+ ret = irq_set_affinity_hint(irq, to_cpumask(&cpu_map));
+ if (ret) {
+ dev_warn(range->kdev->dev,
+ "Failed to set IRQ affinity\n");
+ return ret;
+ }
+ }
+ }
+
+ if (old && !new) {
+ dev_dbg(kdev->dev, "setup-acc-irq: freeing %s for channel %s\n",
+ acc->name, acc->name);
+ free_irq(irq, range);
+ }
+
+ return ret;
+}
+
+static const char *knav_acc_result_str(enum knav_acc_result result)
+{
+ static const char * const result_str[] = {
+ [ACC_RET_IDLE] = "idle",
+ [ACC_RET_SUCCESS] = "success",
+ [ACC_RET_INVALID_COMMAND] = "invalid command",
+ [ACC_RET_INVALID_CHANNEL] = "invalid channel",
+ [ACC_RET_INACTIVE_CHANNEL] = "inactive channel",
+ [ACC_RET_ACTIVE_CHANNEL] = "active channel",
+ [ACC_RET_INVALID_QUEUE] = "invalid queue",
+ [ACC_RET_INVALID_RET] = "invalid return code",
+ };
+
+ if (result >= ARRAY_SIZE(result_str))
+ return result_str[ACC_RET_INVALID_RET];
+ else
+ return result_str[result];
+}
+
+static enum knav_acc_result
+knav_acc_write(struct knav_device *kdev, struct knav_pdsp_info *pdsp,
+ struct knav_reg_acc_command *cmd)
+{
+ u32 result;
+
+ dev_dbg(kdev->dev, "acc command %08x %08x %08x %08x %08x\n",
+ cmd->command, cmd->queue_mask, cmd->list_phys,
+ cmd->queue_num, cmd->timer_config);
+
+ writel_relaxed(cmd->timer_config, &pdsp->acc_command->timer_config);
+ writel_relaxed(cmd->queue_num, &pdsp->acc_command->queue_num);
+ writel_relaxed(cmd->list_phys, &pdsp->acc_command->list_phys);
+ writel_relaxed(cmd->queue_mask, &pdsp->acc_command->queue_mask);
+ writel_relaxed(cmd->command, &pdsp->acc_command->command);
+
+ /* wait for the command to clear */
+ do {
+ result = readl_relaxed(&pdsp->acc_command->command);
+ } while ((result >> 8) & 0xff);
+
+ return (result >> 24) & 0xff;
+}
+
+static void knav_acc_setup_cmd(struct knav_device *kdev,
+ struct knav_range_info *range,
+ struct knav_reg_acc_command *cmd,
+ int queue)
+{
+ struct knav_acc_info *info = &range->acc_info;
+ struct knav_acc_channel *acc;
+ int queue_base;
+ u32 queue_mask;
+
+ if (range->flags & RANGE_MULTI_QUEUE) {
+ acc = range->acc;
+ queue_base = range->queue_base;
+ queue_mask = BIT(range->num_queues) - 1;
+ } else {
+ acc = range->acc + queue;
+ queue_base = range->queue_base + queue;
+ queue_mask = 0;
+ }
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->command = acc->channel;
+ cmd->queue_mask = queue_mask;
+ cmd->list_phys = acc->list_dma[0];
+ cmd->queue_num = info->list_entries << 16;
+ cmd->queue_num |= queue_base;
+
+ cmd->timer_config = ACC_LIST_ENTRY_TYPE << 18;
+ if (range->flags & RANGE_MULTI_QUEUE)
+ cmd->timer_config |= ACC_CFG_MULTI_QUEUE;
+ cmd->timer_config |= info->pacing_mode << 16;
+ cmd->timer_config |= info->timer_count;
+}
+
+static void knav_acc_stop(struct knav_device *kdev,
+ struct knav_range_info *range,
+ int queue)
+{
+ struct knav_reg_acc_command cmd;
+ struct knav_acc_channel *acc;
+ enum knav_acc_result result;
+
+ acc = range->acc + queue;
+
+ knav_acc_setup_cmd(kdev, range, &cmd, queue);
+ cmd.command |= ACC_CMD_DISABLE_CHANNEL << 8;
+ result = knav_acc_write(kdev, range->acc_info.pdsp, &cmd);
+
+ dev_dbg(kdev->dev, "stopped acc channel %s, result %s\n",
+ acc->name, knav_acc_result_str(result));
+}
+
+static enum knav_acc_result knav_acc_start(struct knav_device *kdev,
+ struct knav_range_info *range,
+ int queue)
+{
+ struct knav_reg_acc_command cmd;
+ struct knav_acc_channel *acc;
+ enum knav_acc_result result;
+
+ acc = range->acc + queue;
+
+ knav_acc_setup_cmd(kdev, range, &cmd, queue);
+ cmd.command |= ACC_CMD_ENABLE_CHANNEL << 8;
+ result = knav_acc_write(kdev, range->acc_info.pdsp, &cmd);
+
+ dev_dbg(kdev->dev, "started acc channel %s, result %s\n",
+ acc->name, knav_acc_result_str(result));
+
+ return result;
+}
+
+static int knav_acc_init_range(struct knav_range_info *range)
+{
+ struct knav_device *kdev = range->kdev;
+ struct knav_acc_channel *acc;
+ enum knav_acc_result result;
+ int queue;
+
+ for (queue = 0; queue < range->num_queues; queue++) {
+ acc = range->acc + queue;
+
+ knav_acc_stop(kdev, range, queue);
+ acc->list_index = 0;
+ result = knav_acc_start(kdev, range, queue);
+
+ if (result != ACC_RET_SUCCESS)
+ return -EIO;
+
+ if (range->flags & RANGE_MULTI_QUEUE)
+ return 0;
+ }
+ return 0;
+}
+
+static int knav_acc_init_queue(struct knav_range_info *range,
+ struct knav_queue_inst *kq)
+{
+ unsigned id = kq->id - range->queue_base;
+
+ kq->descs = devm_kzalloc(range->kdev->dev,
+ ACC_DESCS_MAX * sizeof(u32), GFP_KERNEL);
+ if (!kq->descs)
+ return -ENOMEM;
+
+ kq->acc = range->acc;
+ if ((range->flags & RANGE_MULTI_QUEUE) == 0)
+ kq->acc += id;
+ return 0;
+}
+
+static int knav_acc_open_queue(struct knav_range_info *range,
+ struct knav_queue_inst *inst, unsigned flags)
+{
+ unsigned id = inst->id - range->queue_base;
+
+ return knav_range_setup_acc_irq(range, id, true);
+}
+
+static int knav_acc_close_queue(struct knav_range_info *range,
+ struct knav_queue_inst *inst)
+{
+ unsigned id = inst->id - range->queue_base;
+
+ return knav_range_setup_acc_irq(range, id, false);
+}
+
+static int knav_acc_free_range(struct knav_range_info *range)
+{
+ struct knav_device *kdev = range->kdev;
+ struct knav_acc_channel *acc;
+ struct knav_acc_info *info;
+ int channel, channels;
+
+ info = &range->acc_info;
+
+ if (range->flags & RANGE_MULTI_QUEUE)
+ channels = 1;
+ else
+ channels = range->num_queues;
+
+ for (channel = 0; channel < channels; channel++) {
+ acc = range->acc + channel;
+ if (!acc->list_cpu[0])
+ continue;
+ dma_unmap_single(kdev->dev, acc->list_dma[0],
+ info->mem_size, DMA_BIDIRECTIONAL);
+ free_pages_exact(acc->list_cpu[0], info->mem_size);
+ }
+ devm_kfree(range->kdev->dev, range->acc);
+ return 0;
+}
+
+struct knav_range_ops knav_acc_range_ops = {
+ .set_notify = knav_acc_set_notify,
+ .init_queue = knav_acc_init_queue,
+ .open_queue = knav_acc_open_queue,
+ .close_queue = knav_acc_close_queue,
+ .init_range = knav_acc_init_range,
+ .free_range = knav_acc_free_range,
+};
+
+/**
+ * knav_init_acc_range: Initialise accumulator ranges
+ *
+ * @kdev: qmss device
+ * @node: device node
+ * @range: qmms range information
+ *
+ * Return 0 on success or error
+ */
+int knav_init_acc_range(struct knav_device *kdev,
+ struct device_node *node,
+ struct knav_range_info *range)
+{
+ struct knav_acc_channel *acc;
+ struct knav_pdsp_info *pdsp;
+ struct knav_acc_info *info;
+ int ret, channel, channels;
+ int list_size, mem_size;
+ dma_addr_t list_dma;
+ void *list_mem;
+ u32 config[5];
+
+ range->flags |= RANGE_HAS_ACCUMULATOR;
+ info = &range->acc_info;
+
+ ret = of_property_read_u32_array(node, "accumulator", config, 5);
+ if (ret)
+ return ret;
+
+ info->pdsp_id = config[0];
+ info->start_channel = config[1];
+ info->list_entries = config[2];
+ info->pacing_mode = config[3];
+ info->timer_count = config[4] / ACC_DEFAULT_PERIOD;
+
+ if (info->start_channel > ACC_MAX_CHANNEL) {
+ dev_err(kdev->dev, "channel %d invalid for range %s\n",
+ info->start_channel, range->name);
+ return -EINVAL;
+ }
+
+ if (info->pacing_mode > 3) {
+ dev_err(kdev->dev, "pacing mode %d invalid for range %s\n",
+ info->pacing_mode, range->name);
+ return -EINVAL;
+ }
+
+ pdsp = knav_find_pdsp(kdev, info->pdsp_id);
+ if (!pdsp) {
+ dev_err(kdev->dev, "pdsp id %d not found for range %s\n",
+ info->pdsp_id, range->name);
+ return -EINVAL;
+ }
+
+ info->pdsp = pdsp;
+ channels = range->num_queues;
+ if (of_get_property(node, "multi-queue", NULL)) {
+ range->flags |= RANGE_MULTI_QUEUE;
+ channels = 1;
+ if (range->queue_base & (32 - 1)) {
+ dev_err(kdev->dev,
+ "misaligned multi-queue accumulator range %s\n",
+ range->name);
+ return -EINVAL;
+ }
+ if (range->num_queues > 32) {
+ dev_err(kdev->dev,
+ "too many queues in accumulator range %s\n",
+ range->name);
+ return -EINVAL;
+ }
+ }
+
+ /* figure out list size */
+ list_size = info->list_entries;
+ list_size *= ACC_LIST_ENTRY_WORDS * sizeof(u32);
+ info->list_size = list_size;
+ mem_size = PAGE_ALIGN(list_size * 2);
+ info->mem_size = mem_size;
+ range->acc = devm_kzalloc(kdev->dev, channels * sizeof(*range->acc),
+ GFP_KERNEL);
+ if (!range->acc)
+ return -ENOMEM;
+
+ for (channel = 0; channel < channels; channel++) {
+ acc = range->acc + channel;
+ acc->channel = info->start_channel + channel;
+
+ /* allocate memory for the two lists */
+ list_mem = alloc_pages_exact(mem_size, GFP_KERNEL | GFP_DMA);
+ if (!list_mem)
+ return -ENOMEM;
+
+ list_dma = dma_map_single(kdev->dev, list_mem, mem_size,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(kdev->dev, list_dma)) {
+ free_pages_exact(list_mem, mem_size);
+ return -ENOMEM;
+ }
+
+ memset(list_mem, 0, mem_size);
+ dma_sync_single_for_device(kdev->dev, list_dma, mem_size,
+ DMA_TO_DEVICE);
+ scnprintf(acc->name, sizeof(acc->name), "hwqueue-acc-%d",
+ acc->channel);
+ acc->list_cpu[0] = list_mem;
+ acc->list_cpu[1] = list_mem + list_size;
+ acc->list_dma[0] = list_dma;
+ acc->list_dma[1] = list_dma + list_size;
+ dev_dbg(kdev->dev, "%s: channel %d, phys %08x, virt %8p\n",
+ acc->name, acc->channel, list_dma, list_mem);
+ }
+
+ range->ops = &knav_acc_range_ops;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(knav_init_acc_range);
diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c
new file mode 100644
index 0000000..0a2c863
--- /dev/null
+++ b/drivers/soc/ti/knav_qmss_queue.c
@@ -0,0 +1,1816 @@
+/*
+ * Keystone Queue Manager subsystem driver
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
+ * Authors: Sandeep Nair <sandeep_n@ti.com>
+ * Cyril Chemparathy <cyril@ti.com>
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/pm_runtime.h>
+#include <linux/firmware.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/string.h>
+#include <linux/soc/ti/knav_qmss.h>
+
+#include "knav_qmss.h"
+
+static struct knav_device *kdev;
+static DEFINE_MUTEX(knav_dev_lock);
+
+/* Queue manager register indices in DTS */
+#define KNAV_QUEUE_PEEK_REG_INDEX 0
+#define KNAV_QUEUE_STATUS_REG_INDEX 1
+#define KNAV_QUEUE_CONFIG_REG_INDEX 2
+#define KNAV_QUEUE_REGION_REG_INDEX 3
+#define KNAV_QUEUE_PUSH_REG_INDEX 4
+#define KNAV_QUEUE_POP_REG_INDEX 5
+
+/* PDSP register indices in DTS */
+#define KNAV_QUEUE_PDSP_IRAM_REG_INDEX 0
+#define KNAV_QUEUE_PDSP_REGS_REG_INDEX 1
+#define KNAV_QUEUE_PDSP_INTD_REG_INDEX 2
+#define KNAV_QUEUE_PDSP_CMD_REG_INDEX 3
+
+#define knav_queue_idx_to_inst(kdev, idx) \
+ (kdev->instances + (idx << kdev->inst_shift))
+
+#define for_each_handle_rcu(qh, inst) \
+ list_for_each_entry_rcu(qh, &inst->handles, list)
+
+#define for_each_instance(idx, inst, kdev) \
+ for (idx = 0, inst = kdev->instances; \
+ idx < (kdev)->num_queues_in_use; \
+ idx++, inst = knav_queue_idx_to_inst(kdev, idx))
+
+/**
+ * knav_queue_notify: qmss queue notfier call
+ *
+ * @inst: qmss queue instance like accumulator
+ */
+void knav_queue_notify(struct knav_queue_inst *inst)
+{
+ struct knav_queue *qh;
+
+ if (!inst)
+ return;
+
+ rcu_read_lock();
+ for_each_handle_rcu(qh, inst) {
+ if (atomic_read(&qh->notifier_enabled) <= 0)
+ continue;
+ if (WARN_ON(!qh->notifier_fn))
+ continue;
+ atomic_inc(&qh->stats.notifies);
+ qh->notifier_fn(qh->notifier_fn_arg);
+ }
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(knav_queue_notify);
+
+static irqreturn_t knav_queue_int_handler(int irq, void *_instdata)
+{
+ struct knav_queue_inst *inst = _instdata;
+
+ knav_queue_notify(inst);
+ return IRQ_HANDLED;
+}
+
+static int knav_queue_setup_irq(struct knav_range_info *range,
+ struct knav_queue_inst *inst)
+{
+ unsigned queue = inst->id - range->queue_base;
+ unsigned long cpu_map;
+ int ret = 0, irq;
+
+ if (range->flags & RANGE_HAS_IRQ) {
+ irq = range->irqs[queue].irq;
+ cpu_map = range->irqs[queue].cpu_map;
+ ret = request_irq(irq, knav_queue_int_handler, 0,
+ inst->irq_name, inst);
+ if (ret)
+ return ret;
+ disable_irq(irq);
+ if (cpu_map) {
+ ret = irq_set_affinity_hint(irq, to_cpumask(&cpu_map));
+ if (ret) {
+ dev_warn(range->kdev->dev,
+ "Failed to set IRQ affinity\n");
+ return ret;
+ }
+ }
+ }
+ return ret;
+}
+
+static void knav_queue_free_irq(struct knav_queue_inst *inst)
+{
+ struct knav_range_info *range = inst->range;
+ unsigned queue = inst->id - inst->range->queue_base;
+ int irq;
+
+ if (range->flags & RANGE_HAS_IRQ) {
+ irq = range->irqs[queue].irq;
+ irq_set_affinity_hint(irq, NULL);
+ free_irq(irq, inst);
+ }
+}
+
+static inline bool knav_queue_is_busy(struct knav_queue_inst *inst)
+{
+ return !list_empty(&inst->handles);
+}
+
+static inline bool knav_queue_is_reserved(struct knav_queue_inst *inst)
+{
+ return inst->range->flags & RANGE_RESERVED;
+}
+
+static inline bool knav_queue_is_shared(struct knav_queue_inst *inst)
+{
+ struct knav_queue *tmp;
+
+ rcu_read_lock();
+ for_each_handle_rcu(tmp, inst) {
+ if (tmp->flags & KNAV_QUEUE_SHARED) {
+ rcu_read_unlock();
+ return true;
+ }
+ }
+ rcu_read_unlock();
+ return false;
+}
+
+static inline bool knav_queue_match_type(struct knav_queue_inst *inst,
+ unsigned type)
+{
+ if ((type == KNAV_QUEUE_QPEND) &&
+ (inst->range->flags & RANGE_HAS_IRQ)) {
+ return true;
+ } else if ((type == KNAV_QUEUE_ACC) &&
+ (inst->range->flags & RANGE_HAS_ACCUMULATOR)) {
+ return true;
+ } else if ((type == KNAV_QUEUE_GP) &&
+ !(inst->range->flags &
+ (RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ))) {
+ return true;
+ }
+ return false;
+}
+
+static inline struct knav_queue_inst *
+knav_queue_match_id_to_inst(struct knav_device *kdev, unsigned id)
+{
+ struct knav_queue_inst *inst;
+ int idx;
+
+ for_each_instance(idx, inst, kdev) {
+ if (inst->id == id)
+ return inst;
+ }
+ return NULL;
+}
+
+static inline struct knav_queue_inst *knav_queue_find_by_id(int id)
+{
+ if (kdev->base_id <= id &&
+ kdev->base_id + kdev->num_queues > id) {
+ id -= kdev->base_id;
+ return knav_queue_match_id_to_inst(kdev, id);
+ }
+ return NULL;
+}
+
+static struct knav_queue *__knav_queue_open(struct knav_queue_inst *inst,
+ const char *name, unsigned flags)
+{
+ struct knav_queue *qh;
+ unsigned id;
+ int ret = 0;
+
+ qh = devm_kzalloc(inst->kdev->dev, sizeof(*qh), GFP_KERNEL);
+ if (!qh)
+ return ERR_PTR(-ENOMEM);
+
+ qh->flags = flags;
+ qh->inst = inst;
+ id = inst->id - inst->qmgr->start_queue;
+ qh->reg_push = &inst->qmgr->reg_push[id];
+ qh->reg_pop = &inst->qmgr->reg_pop[id];
+ qh->reg_peek = &inst->qmgr->reg_peek[id];
+
+ /* first opener? */
+ if (!knav_queue_is_busy(inst)) {
+ struct knav_range_info *range = inst->range;
+
+ inst->name = kstrndup(name, KNAV_NAME_SIZE, GFP_KERNEL);
+ if (range->ops && range->ops->open_queue)
+ ret = range->ops->open_queue(range, inst, flags);
+
+ if (ret) {
+ devm_kfree(inst->kdev->dev, qh);
+ return ERR_PTR(ret);
+ }
+ }
+ list_add_tail_rcu(&qh->list, &inst->handles);
+ return qh;
+}
+
+static struct knav_queue *
+knav_queue_open_by_id(const char *name, unsigned id, unsigned flags)
+{
+ struct knav_queue_inst *inst;
+ struct knav_queue *qh;
+
+ mutex_lock(&knav_dev_lock);
+
+ qh = ERR_PTR(-ENODEV);
+ inst = knav_queue_find_by_id(id);
+ if (!inst)
+ goto unlock_ret;
+
+ qh = ERR_PTR(-EEXIST);
+ if (!(flags & KNAV_QUEUE_SHARED) && knav_queue_is_busy(inst))
+ goto unlock_ret;
+
+ qh = ERR_PTR(-EBUSY);
+ if ((flags & KNAV_QUEUE_SHARED) &&
+ (knav_queue_is_busy(inst) && !knav_queue_is_shared(inst)))
+ goto unlock_ret;
+
+ qh = __knav_queue_open(inst, name, flags);
+
+unlock_ret:
+ mutex_unlock(&knav_dev_lock);
+
+ return qh;
+}
+
+static struct knav_queue *knav_queue_open_by_type(const char *name,
+ unsigned type, unsigned flags)
+{
+ struct knav_queue_inst *inst;
+ struct knav_queue *qh = ERR_PTR(-EINVAL);
+ int idx;
+
+ mutex_lock(&knav_dev_lock);
+
+ for_each_instance(idx, inst, kdev) {
+ if (knav_queue_is_reserved(inst))
+ continue;
+ if (!knav_queue_match_type(inst, type))
+ continue;
+ if (knav_queue_is_busy(inst))
+ continue;
+ qh = __knav_queue_open(inst, name, flags);
+ goto unlock_ret;
+ }
+
+unlock_ret:
+ mutex_unlock(&knav_dev_lock);
+ return qh;
+}
+
+static void knav_queue_set_notify(struct knav_queue_inst *inst, bool enabled)
+{
+ struct knav_range_info *range = inst->range;
+
+ if (range->ops && range->ops->set_notify)
+ range->ops->set_notify(range, inst, enabled);
+}
+
+static int knav_queue_enable_notifier(struct knav_queue *qh)
+{
+ struct knav_queue_inst *inst = qh->inst;
+ bool first;
+
+ if (WARN_ON(!qh->notifier_fn))
+ return -EINVAL;
+
+ /* Adjust the per handle notifier count */
+ first = (atomic_inc_return(&qh->notifier_enabled) == 1);
+ if (!first)
+ return 0; /* nothing to do */
+
+ /* Now adjust the per instance notifier count */
+ first = (atomic_inc_return(&inst->num_notifiers) == 1);
+ if (first)
+ knav_queue_set_notify(inst, true);
+
+ return 0;
+}
+
+static int knav_queue_disable_notifier(struct knav_queue *qh)
+{
+ struct knav_queue_inst *inst = qh->inst;
+ bool last;
+
+ last = (atomic_dec_return(&qh->notifier_enabled) == 0);
+ if (!last)
+ return 0; /* nothing to do */
+
+ last = (atomic_dec_return(&inst->num_notifiers) == 0);
+ if (last)
+ knav_queue_set_notify(inst, false);
+
+ return 0;
+}
+
+static int knav_queue_set_notifier(struct knav_queue *qh,
+ struct knav_queue_notify_config *cfg)
+{
+ knav_queue_notify_fn old_fn = qh->notifier_fn;
+
+ if (!cfg)
+ return -EINVAL;
+
+ if (!(qh->inst->range->flags & (RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ)))
+ return -ENOTSUPP;
+
+ if (!cfg->fn && old_fn)
+ knav_queue_disable_notifier(qh);
+
+ qh->notifier_fn = cfg->fn;
+ qh->notifier_fn_arg = cfg->fn_arg;
+
+ if (cfg->fn && !old_fn)
+ knav_queue_enable_notifier(qh);
+
+ return 0;
+}
+
+static int knav_gp_set_notify(struct knav_range_info *range,
+ struct knav_queue_inst *inst,
+ bool enabled)
+{
+ unsigned queue;
+
+ if (range->flags & RANGE_HAS_IRQ) {
+ queue = inst->id - range->queue_base;
+ if (enabled)
+ enable_irq(range->irqs[queue].irq);
+ else
+ disable_irq_nosync(range->irqs[queue].irq);
+ }
+ return 0;
+}
+
+static int knav_gp_open_queue(struct knav_range_info *range,
+ struct knav_queue_inst *inst, unsigned flags)
+{
+ return knav_queue_setup_irq(range, inst);
+}
+
+static int knav_gp_close_queue(struct knav_range_info *range,
+ struct knav_queue_inst *inst)
+{
+ knav_queue_free_irq(inst);
+ return 0;
+}
+
+struct knav_range_ops knav_gp_range_ops = {
+ .set_notify = knav_gp_set_notify,
+ .open_queue = knav_gp_open_queue,
+ .close_queue = knav_gp_close_queue,
+};
+
+
+static int knav_queue_get_count(void *qhandle)
+{
+ struct knav_queue *qh = qhandle;
+ struct knav_queue_inst *inst = qh->inst;
+
+ return readl_relaxed(&qh->reg_peek[0].entry_count) +
+ atomic_read(&inst->desc_count);
+}
+
+static void knav_queue_debug_show_instance(struct seq_file *s,
+ struct knav_queue_inst *inst)
+{
+ struct knav_device *kdev = inst->kdev;
+ struct knav_queue *qh;
+
+ if (!knav_queue_is_busy(inst))
+ return;
+
+ seq_printf(s, "\tqueue id %d (%s)\n",
+ kdev->base_id + inst->id, inst->name);
+ for_each_handle_rcu(qh, inst) {
+ seq_printf(s, "\t\thandle %p: ", qh);
+ seq_printf(s, "pushes %8d, ",
+ atomic_read(&qh->stats.pushes));
+ seq_printf(s, "pops %8d, ",
+ atomic_read(&qh->stats.pops));
+ seq_printf(s, "count %8d, ",
+ knav_queue_get_count(qh));
+ seq_printf(s, "notifies %8d, ",
+ atomic_read(&qh->stats.notifies));
+ seq_printf(s, "push errors %8d, ",
+ atomic_read(&qh->stats.push_errors));
+ seq_printf(s, "pop errors %8d\n",
+ atomic_read(&qh->stats.pop_errors));
+ }
+}
+
+static int knav_queue_debug_show(struct seq_file *s, void *v)
+{
+ struct knav_queue_inst *inst;
+ int idx;
+
+ mutex_lock(&knav_dev_lock);
+ seq_printf(s, "%s: %u-%u\n",
+ dev_name(kdev->dev), kdev->base_id,
+ kdev->base_id + kdev->num_queues - 1);
+ for_each_instance(idx, inst, kdev)
+ knav_queue_debug_show_instance(s, inst);
+ mutex_unlock(&knav_dev_lock);
+
+ return 0;
+}
+
+static int knav_queue_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, knav_queue_debug_show, NULL);
+}
+
+static const struct file_operations knav_queue_debug_ops = {
+ .open = knav_queue_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static inline int knav_queue_pdsp_wait(u32 * __iomem addr, unsigned timeout,
+ u32 flags)
+{
+ unsigned long end;
+ u32 val = 0;
+
+ end = jiffies + msecs_to_jiffies(timeout);
+ while (time_after(end, jiffies)) {
+ val = readl_relaxed(addr);
+ if (flags)
+ val &= flags;
+ if (!val)
+ break;
+ cpu_relax();
+ }
+ return val ? -ETIMEDOUT : 0;
+}
+
+
+static int knav_queue_flush(struct knav_queue *qh)
+{
+ struct knav_queue_inst *inst = qh->inst;
+ unsigned id = inst->id - inst->qmgr->start_queue;
+
+ atomic_set(&inst->desc_count, 0);
+ writel_relaxed(0, &inst->qmgr->reg_push[id].ptr_size_thresh);
+ return 0;
+}
+
+/**
+ * knav_queue_open() - open a hardware queue
+ * @name - name to give the queue handle
+ * @id - desired queue number if any or specifes the type
+ * of queue
+ * @flags - the following flags are applicable to queues:
+ * KNAV_QUEUE_SHARED - allow the queue to be shared. Queues are
+ * exclusive by default.
+ * Subsequent attempts to open a shared queue should
+ * also have this flag.
+ *
+ * Returns a handle to the open hardware queue if successful. Use IS_ERR()
+ * to check the returned value for error codes.
+ */
+void *knav_queue_open(const char *name, unsigned id,
+ unsigned flags)
+{
+ struct knav_queue *qh = ERR_PTR(-EINVAL);
+
+ switch (id) {
+ case KNAV_QUEUE_QPEND:
+ case KNAV_QUEUE_ACC:
+ case KNAV_QUEUE_GP:
+ qh = knav_queue_open_by_type(name, id, flags);
+ break;
+
+ default:
+ qh = knav_queue_open_by_id(name, id, flags);
+ break;
+ }
+ return qh;
+}
+EXPORT_SYMBOL_GPL(knav_queue_open);
+
+/**
+ * knav_queue_close() - close a hardware queue handle
+ * @qh - handle to close
+ */
+void knav_queue_close(void *qhandle)
+{
+ struct knav_queue *qh = qhandle;
+ struct knav_queue_inst *inst = qh->inst;
+
+ while (atomic_read(&qh->notifier_enabled) > 0)
+ knav_queue_disable_notifier(qh);
+
+ mutex_lock(&knav_dev_lock);
+ list_del_rcu(&qh->list);
+ mutex_unlock(&knav_dev_lock);
+ synchronize_rcu();
+ if (!knav_queue_is_busy(inst)) {
+ struct knav_range_info *range = inst->range;
+
+ if (range->ops && range->ops->close_queue)
+ range->ops->close_queue(range, inst);
+ }
+ devm_kfree(inst->kdev->dev, qh);
+}
+EXPORT_SYMBOL_GPL(knav_queue_close);
+
+/**
+ * knav_queue_device_control() - Perform control operations on a queue
+ * @qh - queue handle
+ * @cmd - control commands
+ * @arg - command argument
+ *
+ * Returns 0 on success, errno otherwise.
+ */
+int knav_queue_device_control(void *qhandle, enum knav_queue_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct knav_queue *qh = qhandle;
+ struct knav_queue_notify_config *cfg;
+ int ret;
+
+ switch ((int)cmd) {
+ case KNAV_QUEUE_GET_ID:
+ ret = qh->inst->kdev->base_id + qh->inst->id;
+ break;
+
+ case KNAV_QUEUE_FLUSH:
+ ret = knav_queue_flush(qh);
+ break;
+
+ case KNAV_QUEUE_SET_NOTIFIER:
+ cfg = (void *)arg;
+ ret = knav_queue_set_notifier(qh, cfg);
+ break;
+
+ case KNAV_QUEUE_ENABLE_NOTIFY:
+ ret = knav_queue_enable_notifier(qh);
+ break;
+
+ case KNAV_QUEUE_DISABLE_NOTIFY:
+ ret = knav_queue_disable_notifier(qh);
+ break;
+
+ case KNAV_QUEUE_GET_COUNT:
+ ret = knav_queue_get_count(qh);
+ break;
+
+ default:
+ ret = -ENOTSUPP;
+ break;
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(knav_queue_device_control);
+
+
+
+/**
+ * knav_queue_push() - push data (or descriptor) to the tail of a queue
+ * @qh - hardware queue handle
+ * @data - data to push
+ * @size - size of data to push
+ * @flags - can be used to pass additional information
+ *
+ * Returns 0 on success, errno otherwise.
+ */
+int knav_queue_push(void *qhandle, dma_addr_t dma,
+ unsigned size, unsigned flags)
+{
+ struct knav_queue *qh = qhandle;
+ u32 val;
+
+ val = (u32)dma | ((size / 16) - 1);
+ writel_relaxed(val, &qh->reg_push[0].ptr_size_thresh);
+
+ atomic_inc(&qh->stats.pushes);
+ return 0;
+}
+
+/**
+ * knav_queue_pop() - pop data (or descriptor) from the head of a queue
+ * @qh - hardware queue handle
+ * @size - (optional) size of the data pop'ed.
+ *
+ * Returns a DMA address on success, 0 on failure.
+ */
+dma_addr_t knav_queue_pop(void *qhandle, unsigned *size)
+{
+ struct knav_queue *qh = qhandle;
+ struct knav_queue_inst *inst = qh->inst;
+ dma_addr_t dma;
+ u32 val, idx;
+
+ /* are we accumulated? */
+ if (inst->descs) {
+ if (unlikely(atomic_dec_return(&inst->desc_count) < 0)) {
+ atomic_inc(&inst->desc_count);
+ return 0;
+ }
+ idx = atomic_inc_return(&inst->desc_head);
+ idx &= ACC_DESCS_MASK;
+ val = inst->descs[idx];
+ } else {
+ val = readl_relaxed(&qh->reg_pop[0].ptr_size_thresh);
+ if (unlikely(!val))
+ return 0;
+ }
+
+ dma = val & DESC_PTR_MASK;
+ if (size)
+ *size = ((val & DESC_SIZE_MASK) + 1) * 16;
+
+ atomic_inc(&qh->stats.pops);
+ return dma;
+}
+
+/* carve out descriptors and push into queue */
+static void kdesc_fill_pool(struct knav_pool *pool)
+{
+ struct knav_region *region;
+ int i;
+
+ region = pool->region;
+ pool->desc_size = region->desc_size;
+ for (i = 0; i < pool->num_desc; i++) {
+ int index = pool->region_offset + i;
+ dma_addr_t dma_addr;
+ unsigned dma_size;
+ dma_addr = region->dma_start + (region->desc_size * index);
+ dma_size = ALIGN(pool->desc_size, SMP_CACHE_BYTES);
+ dma_sync_single_for_device(pool->dev, dma_addr, dma_size,
+ DMA_TO_DEVICE);
+ knav_queue_push(pool->queue, dma_addr, dma_size, 0);
+ }
+}
+
+/* pop out descriptors and close the queue */
+static void kdesc_empty_pool(struct knav_pool *pool)
+{
+ dma_addr_t dma;
+ unsigned size;
+ void *desc;
+ int i;
+
+ if (!pool->queue)
+ return;
+
+ for (i = 0;; i++) {
+ dma = knav_queue_pop(pool->queue, &size);
+ if (!dma)
+ break;
+ desc = knav_pool_desc_dma_to_virt(pool, dma);
+ if (!desc) {
+ dev_dbg(pool->kdev->dev,
+ "couldn't unmap desc, continuing\n");
+ continue;
+ }
+ }
+ WARN_ON(i != pool->num_desc);
+ knav_queue_close(pool->queue);
+}
+
+
+/* Get the DMA address of a descriptor */
+dma_addr_t knav_pool_desc_virt_to_dma(void *ph, void *virt)
+{
+ struct knav_pool *pool = ph;
+ return pool->region->dma_start + (virt - pool->region->virt_start);
+}
+
+void *knav_pool_desc_dma_to_virt(void *ph, dma_addr_t dma)
+{
+ struct knav_pool *pool = ph;
+ return pool->region->virt_start + (dma - pool->region->dma_start);
+}
+
+/**
+ * knav_pool_create() - Create a pool of descriptors
+ * @name - name to give the pool handle
+ * @num_desc - numbers of descriptors in the pool
+ * @region_id - QMSS region id from which the descriptors are to be
+ * allocated.
+ *
+ * Returns a pool handle on success.
+ * Use IS_ERR_OR_NULL() to identify error values on return.
+ */
+void *knav_pool_create(const char *name,
+ int num_desc, int region_id)
+{
+ struct knav_region *reg_itr, *region = NULL;
+ struct knav_pool *pool, *pi;
+ struct list_head *node;
+ unsigned last_offset;
+ bool slot_found;
+ int ret;
+
+ if (!kdev->dev)
+ return ERR_PTR(-ENODEV);
+
+ pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL);
+ if (!pool) {
+ dev_err(kdev->dev, "out of memory allocating pool\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ for_each_region(kdev, reg_itr) {
+ if (reg_itr->id != region_id)
+ continue;
+ region = reg_itr;
+ break;
+ }
+
+ if (!region) {
+ dev_err(kdev->dev, "region-id(%d) not found\n", region_id);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ pool->queue = knav_queue_open(name, KNAV_QUEUE_GP, 0);
+ if (IS_ERR_OR_NULL(pool->queue)) {
+ dev_err(kdev->dev,
+ "failed to open queue for pool(%s), error %ld\n",
+ name, PTR_ERR(pool->queue));
+ ret = PTR_ERR(pool->queue);
+ goto err;
+ }
+
+ pool->name = kstrndup(name, KNAV_NAME_SIZE, GFP_KERNEL);
+ pool->kdev = kdev;
+ pool->dev = kdev->dev;
+
+ mutex_lock(&knav_dev_lock);
+
+ if (num_desc > (region->num_desc - region->used_desc)) {
+ dev_err(kdev->dev, "out of descs in region(%d) for pool(%s)\n",
+ region_id, name);
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ /* Region maintains a sorted (by region offset) list of pools
+ * use the first free slot which is large enough to accomodate
+ * the request
+ */
+ last_offset = 0;
+ slot_found = false;
+ node = ®ion->pools;
+ list_for_each_entry(pi, ®ion->pools, region_inst) {
+ if ((pi->region_offset - last_offset) >= num_desc) {
+ slot_found = true;
+ break;
+ }
+ last_offset = pi->region_offset + pi->num_desc;
+ }
+ node = &pi->region_inst;
+
+ if (slot_found) {
+ pool->region = region;
+ pool->num_desc = num_desc;
+ pool->region_offset = last_offset;
+ region->used_desc += num_desc;
+ list_add_tail(&pool->list, &kdev->pools);
+ list_add_tail(&pool->region_inst, node);
+ } else {
+ dev_err(kdev->dev, "pool(%s) create failed: fragmented desc pool in region(%d)\n",
+ name, region_id);
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ mutex_unlock(&knav_dev_lock);
+ kdesc_fill_pool(pool);
+ return pool;
+
+err:
+ mutex_unlock(&knav_dev_lock);
+ kfree(pool->name);
+ devm_kfree(kdev->dev, pool);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(knav_pool_create);
+
+/**
+ * knav_pool_destroy() - Free a pool of descriptors
+ * @pool - pool handle
+ */
+void knav_pool_destroy(void *ph)
+{
+ struct knav_pool *pool = ph;
+
+ if (!pool)
+ return;
+
+ if (!pool->region)
+ return;
+
+ kdesc_empty_pool(pool);
+ mutex_lock(&knav_dev_lock);
+
+ pool->region->used_desc -= pool->num_desc;
+ list_del(&pool->region_inst);
+ list_del(&pool->list);
+
+ mutex_unlock(&knav_dev_lock);
+ kfree(pool->name);
+ devm_kfree(kdev->dev, pool);
+}
+EXPORT_SYMBOL_GPL(knav_pool_destroy);
+
+
+/**
+ * knav_pool_desc_get() - Get a descriptor from the pool
+ * @pool - pool handle
+ *
+ * Returns descriptor from the pool.
+ */
+void *knav_pool_desc_get(void *ph)
+{
+ struct knav_pool *pool = ph;
+ dma_addr_t dma;
+ unsigned size;
+ void *data;
+
+ dma = knav_queue_pop(pool->queue, &size);
+ if (unlikely(!dma))
+ return ERR_PTR(-ENOMEM);
+ data = knav_pool_desc_dma_to_virt(pool, dma);
+ return data;
+}
+
+/**
+ * knav_pool_desc_put() - return a descriptor to the pool
+ * @pool - pool handle
+ */
+void knav_pool_desc_put(void *ph, void *desc)
+{
+ struct knav_pool *pool = ph;
+ dma_addr_t dma;
+ dma = knav_pool_desc_virt_to_dma(pool, desc);
+ knav_queue_push(pool->queue, dma, pool->region->desc_size, 0);
+}
+
+/**
+ * knav_pool_desc_map() - Map descriptor for DMA transfer
+ * @pool - pool handle
+ * @desc - address of descriptor to map
+ * @size - size of descriptor to map
+ * @dma - DMA address return pointer
+ * @dma_sz - adjusted return pointer
+ *
+ * Returns 0 on success, errno otherwise.
+ */
+int knav_pool_desc_map(void *ph, void *desc, unsigned size,
+ dma_addr_t *dma, unsigned *dma_sz)
+{
+ struct knav_pool *pool = ph;
+ *dma = knav_pool_desc_virt_to_dma(pool, desc);
+ size = min(size, pool->region->desc_size);
+ size = ALIGN(size, SMP_CACHE_BYTES);
+ *dma_sz = size;
+ dma_sync_single_for_device(pool->dev, *dma, size, DMA_TO_DEVICE);
+
+ /* Ensure the descriptor reaches to the memory */
+ __iowmb();
+
+ return 0;
+}
+
+/**
+ * knav_pool_desc_unmap() - Unmap descriptor after DMA transfer
+ * @pool - pool handle
+ * @dma - DMA address of descriptor to unmap
+ * @dma_sz - size of descriptor to unmap
+ *
+ * Returns descriptor address on success, Use IS_ERR_OR_NULL() to identify
+ * error values on return.
+ */
+void *knav_pool_desc_unmap(void *ph, dma_addr_t dma, unsigned dma_sz)
+{
+ struct knav_pool *pool = ph;
+ unsigned desc_sz;
+ void *desc;
+
+ desc_sz = min(dma_sz, pool->region->desc_size);
+ desc = knav_pool_desc_dma_to_virt(pool, dma);
+ dma_sync_single_for_cpu(pool->dev, dma, desc_sz, DMA_FROM_DEVICE);
+ prefetch(desc);
+ return desc;
+}
+
+/**
+ * knav_pool_count() - Get the number of descriptors in pool.
+ * @pool - pool handle
+ * Returns number of elements in the pool.
+ */
+int knav_pool_count(void *ph)
+{
+ struct knav_pool *pool = ph;
+ return knav_queue_get_count(pool->queue);
+}
+
+static void knav_queue_setup_region(struct knav_device *kdev,
+ struct knav_region *region)
+{
+ unsigned hw_num_desc, hw_desc_size, size;
+ struct knav_reg_region __iomem *regs;
+ struct knav_qmgr_info *qmgr;
+ struct knav_pool *pool;
+ int id = region->id;
+ struct page *page;
+
+ /* unused region? */
+ if (!region->num_desc) {
+ dev_warn(kdev->dev, "unused region %s\n", region->name);
+ return;
+ }
+
+ /* get hardware descriptor value */
+ hw_num_desc = ilog2(region->num_desc - 1) + 1;
+
+ /* did we force fit ourselves into nothingness? */
+ if (region->num_desc < 32) {
+ region->num_desc = 0;
+ dev_warn(kdev->dev, "too few descriptors in region %s\n",
+ region->name);
+ return;
+ }
+
+ size = region->num_desc * region->desc_size;
+ region->virt_start = alloc_pages_exact(size, GFP_KERNEL | GFP_DMA |
+ GFP_DMA32);
+ if (!region->virt_start) {
+ region->num_desc = 0;
+ dev_err(kdev->dev, "memory alloc failed for region %s\n",
+ region->name);
+ return;
+ }
+ region->virt_end = region->virt_start + size;
+ page = virt_to_page(region->virt_start);
+
+ region->dma_start = dma_map_page(kdev->dev, page, 0, size,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(kdev->dev, region->dma_start)) {
+ dev_err(kdev->dev, "dma map failed for region %s\n",
+ region->name);
+ goto fail;
+ }
+ region->dma_end = region->dma_start + size;
+
+ pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL);
+ if (!pool) {
+ dev_err(kdev->dev, "out of memory allocating dummy pool\n");
+ goto fail;
+ }
+ pool->num_desc = 0;
+ pool->region_offset = region->num_desc;
+ list_add(&pool->region_inst, ®ion->pools);
+
+ dev_dbg(kdev->dev,
+ "region %s (%d): size:%d, link:%d@%d, phys:%08x-%08x, virt:%p-%p\n",
+ region->name, id, region->desc_size, region->num_desc,
+ region->link_index, region->dma_start, region->dma_end,
+ region->virt_start, region->virt_end);
+
+ hw_desc_size = (region->desc_size / 16) - 1;
+ hw_num_desc -= 5;
+
+ for_each_qmgr(kdev, qmgr) {
+ regs = qmgr->reg_region + id;
+ writel_relaxed(region->dma_start, ®s->base);
+ writel_relaxed(region->link_index, ®s->start_index);
+ writel_relaxed(hw_desc_size << 16 | hw_num_desc,
+ ®s->size_count);
+ }
+ return;
+
+fail:
+ if (region->dma_start)
+ dma_unmap_page(kdev->dev, region->dma_start, size,
+ DMA_BIDIRECTIONAL);
+ if (region->virt_start)
+ free_pages_exact(region->virt_start, size);
+ region->num_desc = 0;
+ return;
+}
+
+static const char *knav_queue_find_name(struct device_node *node)
+{
+ const char *name;
+
+ if (of_property_read_string(node, "label", &name) < 0)
+ name = node->name;
+ if (!name)
+ name = "unknown";
+ return name;
+}
+
+static int knav_queue_setup_regions(struct knav_device *kdev,
+ struct device_node *regions)
+{
+ struct device *dev = kdev->dev;
+ struct knav_region *region;
+ struct device_node *child;
+ u32 temp[2];
+ int ret;
+
+ for_each_child_of_node(regions, child) {
+ region = devm_kzalloc(dev, sizeof(*region), GFP_KERNEL);
+ if (!region) {
+ dev_err(dev, "out of memory allocating region\n");
+ return -ENOMEM;
+ }
+
+ region->name = knav_queue_find_name(child);
+ of_property_read_u32(child, "id", ®ion->id);
+ ret = of_property_read_u32_array(child, "region-spec", temp, 2);
+ if (!ret) {
+ region->num_desc = temp[0];
+ region->desc_size = temp[1];
+ } else {
+ dev_err(dev, "invalid region info %s\n", region->name);
+ devm_kfree(dev, region);
+ continue;
+ }
+
+ if (!of_get_property(child, "link-index", NULL)) {
+ dev_err(dev, "No link info for %s\n", region->name);
+ devm_kfree(dev, region);
+ continue;
+ }
+ ret = of_property_read_u32(child, "link-index",
+ ®ion->link_index);
+ if (ret) {
+ dev_err(dev, "link index not found for %s\n",
+ region->name);
+ devm_kfree(dev, region);
+ continue;
+ }
+
+ INIT_LIST_HEAD(®ion->pools);
+ list_add_tail(®ion->list, &kdev->regions);
+ }
+ if (list_empty(&kdev->regions)) {
+ dev_err(dev, "no valid region information found\n");
+ return -ENODEV;
+ }
+
+ /* Next, we run through the regions and set things up */
+ for_each_region(kdev, region)
+ knav_queue_setup_region(kdev, region);
+
+ return 0;
+}
+
+static int knav_get_link_ram(struct knav_device *kdev,
+ const char *name,
+ struct knav_link_ram_block *block)
+{
+ struct platform_device *pdev = to_platform_device(kdev->dev);
+ struct device_node *node = pdev->dev.of_node;
+ u32 temp[2];
+
+ /*
+ * Note: link ram resources are specified in "entry" sized units. In
+ * reality, although entries are ~40bits in hardware, we treat them as
+ * 64-bit entities here.
+ *
+ * For example, to specify the internal link ram for Keystone-I class
+ * devices, we would set the linkram0 resource to 0x80000-0x83fff.
+ *
+ * This gets a bit weird when other link rams are used. For example,
+ * if the range specified is 0x0c000000-0x0c003fff (i.e., 16K entries
+ * in MSMC SRAM), the actual memory used is 0x0c000000-0x0c020000,
+ * which accounts for 64-bits per entry, for 16K entries.
+ */
+ if (!of_property_read_u32_array(node, name , temp, 2)) {
+ if (temp[0]) {
+ /*
+ * queue_base specified => using internal or onchip
+ * link ram WARNING - we do not "reserve" this block
+ */
+ block->phys = (dma_addr_t)temp[0];
+ block->virt = NULL;
+ block->size = temp[1];
+ } else {
+ block->size = temp[1];
+ /* queue_base not specific => allocate requested size */
+ block->virt = dmam_alloc_coherent(kdev->dev,
+ 8 * block->size, &block->phys,
+ GFP_KERNEL);
+ if (!block->virt) {
+ dev_err(kdev->dev, "failed to alloc linkram\n");
+ return -ENOMEM;
+ }
+ }
+ } else {
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static int knav_queue_setup_link_ram(struct knav_device *kdev)
+{
+ struct knav_link_ram_block *block;
+ struct knav_qmgr_info *qmgr;
+
+ for_each_qmgr(kdev, qmgr) {
+ block = &kdev->link_rams[0];
+ dev_dbg(kdev->dev, "linkram0: phys:%x, virt:%p, size:%x\n",
+ block->phys, block->virt, block->size);
+ writel_relaxed(block->phys, &qmgr->reg_config->link_ram_base0);
+ writel_relaxed(block->size, &qmgr->reg_config->link_ram_size0);
+
+ block++;
+ if (!block->size)
+ return 0;
+
+ dev_dbg(kdev->dev, "linkram1: phys:%x, virt:%p, size:%x\n",
+ block->phys, block->virt, block->size);
+ writel_relaxed(block->phys, &qmgr->reg_config->link_ram_base1);
+ }
+
+ return 0;
+}
+
+static int knav_setup_queue_range(struct knav_device *kdev,
+ struct device_node *node)
+{
+ struct device *dev = kdev->dev;
+ struct knav_range_info *range;
+ struct knav_qmgr_info *qmgr;
+ u32 temp[2], start, end, id, index;
+ int ret, i;
+
+ range = devm_kzalloc(dev, sizeof(*range), GFP_KERNEL);
+ if (!range) {
+ dev_err(dev, "out of memory allocating range\n");
+ return -ENOMEM;
+ }
+
+ range->kdev = kdev;
+ range->name = knav_queue_find_name(node);
+ ret = of_property_read_u32_array(node, "qrange", temp, 2);
+ if (!ret) {
+ range->queue_base = temp[0] - kdev->base_id;
+ range->num_queues = temp[1];
+ } else {
+ dev_err(dev, "invalid queue range %s\n", range->name);
+ devm_kfree(dev, range);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < RANGE_MAX_IRQS; i++) {
+ struct of_phandle_args oirq;
+
+ if (of_irq_parse_one(node, i, &oirq))
+ break;
+
+ range->irqs[i].irq = irq_create_of_mapping(&oirq);
+ if (range->irqs[i].irq == IRQ_NONE)
+ break;
+
+ range->num_irqs++;
+
+ if (oirq.args_count == 3)
+ range->irqs[i].cpu_map =
+ (oirq.args[2] & 0x0000ff00) >> 8;
+ }
+
+ range->num_irqs = min(range->num_irqs, range->num_queues);
+ if (range->num_irqs)
+ range->flags |= RANGE_HAS_IRQ;
+
+ if (of_get_property(node, "qalloc-by-id", NULL))
+ range->flags |= RANGE_RESERVED;
+
+ if (of_get_property(node, "accumulator", NULL)) {
+ ret = knav_init_acc_range(kdev, node, range);
+ if (ret < 0) {
+ devm_kfree(dev, range);
+ return ret;
+ }
+ } else {
+ range->ops = &knav_gp_range_ops;
+ }
+
+ /* set threshold to 1, and flush out the queues */
+ for_each_qmgr(kdev, qmgr) {
+ start = max(qmgr->start_queue, range->queue_base);
+ end = min(qmgr->start_queue + qmgr->num_queues,
+ range->queue_base + range->num_queues);
+ for (id = start; id < end; id++) {
+ index = id - qmgr->start_queue;
+ writel_relaxed(THRESH_GTE | 1,
+ &qmgr->reg_peek[index].ptr_size_thresh);
+ writel_relaxed(0,
+ &qmgr->reg_push[index].ptr_size_thresh);
+ }
+ }
+
+ list_add_tail(&range->list, &kdev->queue_ranges);
+ dev_dbg(dev, "added range %s: %d-%d, %d irqs%s%s%s\n",
+ range->name, range->queue_base,
+ range->queue_base + range->num_queues - 1,
+ range->num_irqs,
+ (range->flags & RANGE_HAS_IRQ) ? ", has irq" : "",
+ (range->flags & RANGE_RESERVED) ? ", reserved" : "",
+ (range->flags & RANGE_HAS_ACCUMULATOR) ? ", acc" : "");
+ kdev->num_queues_in_use += range->num_queues;
+ return 0;
+}
+
+static int knav_setup_queue_pools(struct knav_device *kdev,
+ struct device_node *queue_pools)
+{
+ struct device_node *type, *range;
+ int ret;
+
+ for_each_child_of_node(queue_pools, type) {
+ for_each_child_of_node(type, range) {
+ ret = knav_setup_queue_range(kdev, range);
+ /* return value ignored, we init the rest... */
+ }
+ }
+
+ /* ... and barf if they all failed! */
+ if (list_empty(&kdev->queue_ranges)) {
+ dev_err(kdev->dev, "no valid queue range found\n");
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static void knav_free_queue_range(struct knav_device *kdev,
+ struct knav_range_info *range)
+{
+ if (range->ops && range->ops->free_range)
+ range->ops->free_range(range);
+ list_del(&range->list);
+ devm_kfree(kdev->dev, range);
+}
+
+static void knav_free_queue_ranges(struct knav_device *kdev)
+{
+ struct knav_range_info *range;
+
+ for (;;) {
+ range = first_queue_range(kdev);
+ if (!range)
+ break;
+ knav_free_queue_range(kdev, range);
+ }
+}
+
+static void knav_queue_free_regions(struct knav_device *kdev)
+{
+ struct knav_region *region;
+ struct knav_pool *pool;
+ unsigned size;
+
+ for (;;) {
+ region = first_region(kdev);
+ if (!region)
+ break;
+ list_for_each_entry(pool, ®ion->pools, region_inst)
+ knav_pool_destroy(pool);
+
+ size = region->virt_end - region->virt_start;
+ if (size)
+ free_pages_exact(region->virt_start, size);
+ list_del(®ion->list);
+ devm_kfree(kdev->dev, region);
+ }
+}
+
+static void __iomem *knav_queue_map_reg(struct knav_device *kdev,
+ struct device_node *node, int index)
+{
+ struct resource res;
+ void __iomem *regs;
+ int ret;
+
+ ret = of_address_to_resource(node, index, &res);
+ if (ret) {
+ dev_err(kdev->dev, "Can't translate of node(%s) address for index(%d)\n",
+ node->name, index);
+ return ERR_PTR(ret);
+ }
+
+ regs = devm_ioremap_resource(kdev->dev, &res);
+ if (IS_ERR(regs))
+ dev_err(kdev->dev, "Failed to map register base for index(%d) node(%s)\n",
+ index, node->name);
+ return regs;
+}
+
+static int knav_queue_init_qmgrs(struct knav_device *kdev,
+ struct device_node *qmgrs)
+{
+ struct device *dev = kdev->dev;
+ struct knav_qmgr_info *qmgr;
+ struct device_node *child;
+ u32 temp[2];
+ int ret;
+
+ for_each_child_of_node(qmgrs, child) {
+ qmgr = devm_kzalloc(dev, sizeof(*qmgr), GFP_KERNEL);
+ if (!qmgr) {
+ dev_err(dev, "out of memory allocating qmgr\n");
+ return -ENOMEM;
+ }
+
+ ret = of_property_read_u32_array(child, "managed-queues",
+ temp, 2);
+ if (!ret) {
+ qmgr->start_queue = temp[0];
+ qmgr->num_queues = temp[1];
+ } else {
+ dev_err(dev, "invalid qmgr queue range\n");
+ devm_kfree(dev, qmgr);
+ continue;
+ }
+
+ dev_info(dev, "qmgr start queue %d, number of queues %d\n",
+ qmgr->start_queue, qmgr->num_queues);
+
+ qmgr->reg_peek =
+ knav_queue_map_reg(kdev, child,
+ KNAV_QUEUE_PEEK_REG_INDEX);
+ qmgr->reg_status =
+ knav_queue_map_reg(kdev, child,
+ KNAV_QUEUE_STATUS_REG_INDEX);
+ qmgr->reg_config =
+ knav_queue_map_reg(kdev, child,
+ KNAV_QUEUE_CONFIG_REG_INDEX);
+ qmgr->reg_region =
+ knav_queue_map_reg(kdev, child,
+ KNAV_QUEUE_REGION_REG_INDEX);
+ qmgr->reg_push =
+ knav_queue_map_reg(kdev, child,
+ KNAV_QUEUE_PUSH_REG_INDEX);
+ qmgr->reg_pop =
+ knav_queue_map_reg(kdev, child,
+ KNAV_QUEUE_POP_REG_INDEX);
+
+ if (IS_ERR(qmgr->reg_peek) || IS_ERR(qmgr->reg_status) ||
+ IS_ERR(qmgr->reg_config) || IS_ERR(qmgr->reg_region) ||
+ IS_ERR(qmgr->reg_push) || IS_ERR(qmgr->reg_pop)) {
+ dev_err(dev, "failed to map qmgr regs\n");
+ if (!IS_ERR(qmgr->reg_peek))
+ devm_iounmap(dev, qmgr->reg_peek);
+ if (!IS_ERR(qmgr->reg_status))
+ devm_iounmap(dev, qmgr->reg_status);
+ if (!IS_ERR(qmgr->reg_config))
+ devm_iounmap(dev, qmgr->reg_config);
+ if (!IS_ERR(qmgr->reg_region))
+ devm_iounmap(dev, qmgr->reg_region);
+ if (!IS_ERR(qmgr->reg_push))
+ devm_iounmap(dev, qmgr->reg_push);
+ if (!IS_ERR(qmgr->reg_pop))
+ devm_iounmap(dev, qmgr->reg_pop);
+ devm_kfree(dev, qmgr);
+ continue;
+ }
+
+ list_add_tail(&qmgr->list, &kdev->qmgrs);
+ dev_info(dev, "added qmgr start queue %d, num of queues %d, reg_peek %p, reg_status %p, reg_config %p, reg_region %p, reg_push %p, reg_pop %p\n",
+ qmgr->start_queue, qmgr->num_queues,
+ qmgr->reg_peek, qmgr->reg_status,
+ qmgr->reg_config, qmgr->reg_region,
+ qmgr->reg_push, qmgr->reg_pop);
+ }
+ return 0;
+}
+
+static int knav_queue_init_pdsps(struct knav_device *kdev,
+ struct device_node *pdsps)
+{
+ struct device *dev = kdev->dev;
+ struct knav_pdsp_info *pdsp;
+ struct device_node *child;
+ int ret;
+
+ for_each_child_of_node(pdsps, child) {
+ pdsp = devm_kzalloc(dev, sizeof(*pdsp), GFP_KERNEL);
+ if (!pdsp) {
+ dev_err(dev, "out of memory allocating pdsp\n");
+ return -ENOMEM;
+ }
+ pdsp->name = knav_queue_find_name(child);
+ ret = of_property_read_string(child, "firmware",
+ &pdsp->firmware);
+ if (ret < 0 || !pdsp->firmware) {
+ dev_err(dev, "unknown firmware for pdsp %s\n",
+ pdsp->name);
+ devm_kfree(dev, pdsp);
+ continue;
+ }
+ dev_dbg(dev, "pdsp name %s fw name :%s\n", pdsp->name,
+ pdsp->firmware);
+
+ pdsp->iram =
+ knav_queue_map_reg(kdev, child,
+ KNAV_QUEUE_PDSP_IRAM_REG_INDEX);
+ pdsp->regs =
+ knav_queue_map_reg(kdev, child,
+ KNAV_QUEUE_PDSP_REGS_REG_INDEX);
+ pdsp->intd =
+ knav_queue_map_reg(kdev, child,
+ KNAV_QUEUE_PDSP_INTD_REG_INDEX);
+ pdsp->command =
+ knav_queue_map_reg(kdev, child,
+ KNAV_QUEUE_PDSP_CMD_REG_INDEX);
+
+ if (IS_ERR(pdsp->command) || IS_ERR(pdsp->iram) ||
+ IS_ERR(pdsp->regs) || IS_ERR(pdsp->intd)) {
+ dev_err(dev, "failed to map pdsp %s regs\n",
+ pdsp->name);
+ if (!IS_ERR(pdsp->command))
+ devm_iounmap(dev, pdsp->command);
+ if (!IS_ERR(pdsp->iram))
+ devm_iounmap(dev, pdsp->iram);
+ if (!IS_ERR(pdsp->regs))
+ devm_iounmap(dev, pdsp->regs);
+ if (!IS_ERR(pdsp->intd))
+ devm_iounmap(dev, pdsp->intd);
+ devm_kfree(dev, pdsp);
+ continue;
+ }
+ of_property_read_u32(child, "id", &pdsp->id);
+ list_add_tail(&pdsp->list, &kdev->pdsps);
+ dev_dbg(dev, "added pdsp %s: command %p, iram %p, regs %p, intd %p, firmware %s\n",
+ pdsp->name, pdsp->command, pdsp->iram, pdsp->regs,
+ pdsp->intd, pdsp->firmware);
+ }
+ return 0;
+}
+
+static int knav_queue_stop_pdsp(struct knav_device *kdev,
+ struct knav_pdsp_info *pdsp)
+{
+ u32 val, timeout = 1000;
+ int ret;
+
+ val = readl_relaxed(&pdsp->regs->control) & ~PDSP_CTRL_ENABLE;
+ writel_relaxed(val, &pdsp->regs->control);
+ ret = knav_queue_pdsp_wait(&pdsp->regs->control, timeout,
+ PDSP_CTRL_RUNNING);
+ if (ret < 0) {
+ dev_err(kdev->dev, "timed out on pdsp %s stop\n", pdsp->name);
+ return ret;
+ }
+ return 0;
+}
+
+static int knav_queue_load_pdsp(struct knav_device *kdev,
+ struct knav_pdsp_info *pdsp)
+{
+ int i, ret, fwlen;
+ const struct firmware *fw;
+ u32 *fwdata;
+
+ ret = request_firmware(&fw, pdsp->firmware, kdev->dev);
+ if (ret) {
+ dev_err(kdev->dev, "failed to get firmware %s for pdsp %s\n",
+ pdsp->firmware, pdsp->name);
+ return ret;
+ }
+ writel_relaxed(pdsp->id + 1, pdsp->command + 0x18);
+ /* download the firmware */
+ fwdata = (u32 *)fw->data;
+ fwlen = (fw->size + sizeof(u32) - 1) / sizeof(u32);
+ for (i = 0; i < fwlen; i++)
+ writel_relaxed(be32_to_cpu(fwdata[i]), pdsp->iram + i);
+
+ release_firmware(fw);
+ return 0;
+}
+
+static int knav_queue_start_pdsp(struct knav_device *kdev,
+ struct knav_pdsp_info *pdsp)
+{
+ u32 val, timeout = 1000;
+ int ret;
+
+ /* write a command for sync */
+ writel_relaxed(0xffffffff, pdsp->command);
+ while (readl_relaxed(pdsp->command) != 0xffffffff)
+ cpu_relax();
+
+ /* soft reset the PDSP */
+ val = readl_relaxed(&pdsp->regs->control);
+ val &= ~(PDSP_CTRL_PC_MASK | PDSP_CTRL_SOFT_RESET);
+ writel_relaxed(val, &pdsp->regs->control);
+
+ /* enable pdsp */
+ val = readl_relaxed(&pdsp->regs->control) | PDSP_CTRL_ENABLE;
+ writel_relaxed(val, &pdsp->regs->control);
+
+ /* wait for command register to clear */
+ ret = knav_queue_pdsp_wait(pdsp->command, timeout, 0);
+ if (ret < 0) {
+ dev_err(kdev->dev,
+ "timed out on pdsp %s command register wait\n",
+ pdsp->name);
+ return ret;
+ }
+ return 0;
+}
+
+static void knav_queue_stop_pdsps(struct knav_device *kdev)
+{
+ struct knav_pdsp_info *pdsp;
+
+ /* disable all pdsps */
+ for_each_pdsp(kdev, pdsp)
+ knav_queue_stop_pdsp(kdev, pdsp);
+}
+
+static int knav_queue_start_pdsps(struct knav_device *kdev)
+{
+ struct knav_pdsp_info *pdsp;
+ int ret;
+
+ knav_queue_stop_pdsps(kdev);
+ /* now load them all */
+ for_each_pdsp(kdev, pdsp) {
+ ret = knav_queue_load_pdsp(kdev, pdsp);
+ if (ret < 0)
+ return ret;
+ }
+
+ for_each_pdsp(kdev, pdsp) {
+ ret = knav_queue_start_pdsp(kdev, pdsp);
+ WARN_ON(ret);
+ }
+ return 0;
+}
+
+static inline struct knav_qmgr_info *knav_find_qmgr(unsigned id)
+{
+ struct knav_qmgr_info *qmgr;
+
+ for_each_qmgr(kdev, qmgr) {
+ if ((id >= qmgr->start_queue) &&
+ (id < qmgr->start_queue + qmgr->num_queues))
+ return qmgr;
+ }
+ return NULL;
+}
+
+static int knav_queue_init_queue(struct knav_device *kdev,
+ struct knav_range_info *range,
+ struct knav_queue_inst *inst,
+ unsigned id)
+{
+ char irq_name[KNAV_NAME_SIZE];
+ inst->qmgr = knav_find_qmgr(id);
+ if (!inst->qmgr)
+ return -1;
+
+ INIT_LIST_HEAD(&inst->handles);
+ inst->kdev = kdev;
+ inst->range = range;
+ inst->irq_num = -1;
+ inst->id = id;
+ scnprintf(irq_name, sizeof(irq_name), "hwqueue-%d", id);
+ inst->irq_name = kstrndup(irq_name, sizeof(irq_name), GFP_KERNEL);
+
+ if (range->ops && range->ops->init_queue)
+ return range->ops->init_queue(range, inst);
+ else
+ return 0;
+}
+
+static int knav_queue_init_queues(struct knav_device *kdev)
+{
+ struct knav_range_info *range;
+ int size, id, base_idx;
+ int idx = 0, ret = 0;
+
+ /* how much do we need for instance data? */
+ size = sizeof(struct knav_queue_inst);
+
+ /* round this up to a power of 2, keep the index to instance
+ * arithmetic fast.
+ * */
+ kdev->inst_shift = order_base_2(size);
+ size = (1 << kdev->inst_shift) * kdev->num_queues_in_use;
+ kdev->instances = devm_kzalloc(kdev->dev, size, GFP_KERNEL);
+ if (!kdev->instances)
+ return -1;
+
+ for_each_queue_range(kdev, range) {
+ if (range->ops && range->ops->init_range)
+ range->ops->init_range(range);
+ base_idx = idx;
+ for (id = range->queue_base;
+ id < range->queue_base + range->num_queues; id++, idx++) {
+ ret = knav_queue_init_queue(kdev, range,
+ knav_queue_idx_to_inst(kdev, idx), id);
+ if (ret < 0)
+ return ret;
+ }
+ range->queue_base_inst =
+ knav_queue_idx_to_inst(kdev, base_idx);
+ }
+ return 0;
+}
+
+static int knav_queue_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct device_node *qmgrs, *queue_pools, *regions, *pdsps;
+ struct device *dev = &pdev->dev;
+ u32 temp[2];
+ int ret;
+
+ if (!node) {
+ dev_err(dev, "device tree info unavailable\n");
+ return -ENODEV;
+ }
+
+ kdev = devm_kzalloc(dev, sizeof(struct knav_device), GFP_KERNEL);
+ if (!kdev) {
+ dev_err(dev, "memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, kdev);
+ kdev->dev = dev;
+ INIT_LIST_HEAD(&kdev->queue_ranges);
+ INIT_LIST_HEAD(&kdev->qmgrs);
+ INIT_LIST_HEAD(&kdev->pools);
+ INIT_LIST_HEAD(&kdev->regions);
+ INIT_LIST_HEAD(&kdev->pdsps);
+
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable QMSS\n");
+ return ret;
+ }
+
+ if (of_property_read_u32_array(node, "queue-range", temp, 2)) {
+ dev_err(dev, "queue-range not specified\n");
+ ret = -ENODEV;
+ goto err;
+ }
+ kdev->base_id = temp[0];
+ kdev->num_queues = temp[1];
+
+ /* Initialize queue managers using device tree configuration */
+ qmgrs = of_get_child_by_name(node, "qmgrs");
+ if (!qmgrs) {
+ dev_err(dev, "queue manager info not specified\n");
+ ret = -ENODEV;
+ goto err;
+ }
+ ret = knav_queue_init_qmgrs(kdev, qmgrs);
+ of_node_put(qmgrs);
+ if (ret)
+ goto err;
+
+ /* get pdsp configuration values from device tree */
+ pdsps = of_get_child_by_name(node, "pdsps");
+ if (pdsps) {
+ ret = knav_queue_init_pdsps(kdev, pdsps);
+ if (ret)
+ goto err;
+
+ ret = knav_queue_start_pdsps(kdev);
+ if (ret)
+ goto err;
+ }
+ of_node_put(pdsps);
+
+ /* get usable queue range values from device tree */
+ queue_pools = of_get_child_by_name(node, "queue-pools");
+ if (!queue_pools) {
+ dev_err(dev, "queue-pools not specified\n");
+ ret = -ENODEV;
+ goto err;
+ }
+ ret = knav_setup_queue_pools(kdev, queue_pools);
+ of_node_put(queue_pools);
+ if (ret)
+ goto err;
+
+ ret = knav_get_link_ram(kdev, "linkram0", &kdev->link_rams[0]);
+ if (ret) {
+ dev_err(kdev->dev, "could not setup linking ram\n");
+ goto err;
+ }
+
+ ret = knav_get_link_ram(kdev, "linkram1", &kdev->link_rams[1]);
+ if (ret) {
+ /*
+ * nothing really, we have one linking ram already, so we just
+ * live within our means
+ */
+ }
+
+ ret = knav_queue_setup_link_ram(kdev);
+ if (ret)
+ goto err;
+
+ regions = of_get_child_by_name(node, "descriptor-regions");
+ if (!regions) {
+ dev_err(dev, "descriptor-regions not specified\n");
+ goto err;
+ }
+ ret = knav_queue_setup_regions(kdev, regions);
+ of_node_put(regions);
+ if (ret)
+ goto err;
+
+ ret = knav_queue_init_queues(kdev);
+ if (ret < 0) {
+ dev_err(dev, "hwqueue initialization failed\n");
+ goto err;
+ }
+
+ debugfs_create_file("qmss", S_IFREG | S_IRUGO, NULL, NULL,
+ &knav_queue_debug_ops);
+ return 0;
+
+err:
+ knav_queue_stop_pdsps(kdev);
+ knav_queue_free_regions(kdev);
+ knav_free_queue_ranges(kdev);
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+}
+
+static int knav_queue_remove(struct platform_device *pdev)
+{
+ /* TODO: Free resources */
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+/* Match table for of_platform binding */
+static struct of_device_id keystone_qmss_of_match[] = {
+ { .compatible = "ti,keystone-navigator-qmss", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, keystone_qmss_of_match);
+
+static struct platform_driver keystone_qmss_driver = {
+ .probe = knav_queue_probe,
+ .remove = knav_queue_remove,
+ .driver = {
+ .name = "keystone-navigator-qmss",
+ .owner = THIS_MODULE,
+ .of_match_table = keystone_qmss_of_match,
+ },
+};
+module_platform_driver(keystone_qmss_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI QMSS driver for Keystone SOCs");
+MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com>");
+MODULE_AUTHOR("Santosh Shilimkar <santosh.shilimkar@ti.com>");
diff --git a/include/linux/soc/ti/knav_dma.h b/include/linux/soc/ti/knav_dma.h
new file mode 100644
index 0000000..dad035c
--- /dev/null
+++ b/include/linux/soc/ti/knav_dma.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright (C) 2014 Texas Instruments Incorporated
+ * Authors: Sandeep Nair <sandeep_n@ti.com
+ * Cyril Chemparathy <cyril@ti.com
+ Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SOC_TI_KEYSTONE_NAVIGATOR_DMA_H__
+#define __SOC_TI_KEYSTONE_NAVIGATOR_DMA_H__
+
+/*
+ * PKTDMA descriptor manipulation macros for host packet descriptor
+ */
+#define MASK(x) (BIT(x) - 1)
+#define KNAV_DMA_DESC_PKT_LEN_MASK MASK(22)
+#define KNAV_DMA_DESC_PKT_LEN_SHIFT 0
+#define KNAV_DMA_DESC_PS_INFO_IN_SOP BIT(22)
+#define KNAV_DMA_DESC_PS_INFO_IN_DESC 0
+#define KNAV_DMA_DESC_TAG_MASK MASK(8)
+#define KNAV_DMA_DESC_SAG_HI_SHIFT 24
+#define KNAV_DMA_DESC_STAG_LO_SHIFT 16
+#define KNAV_DMA_DESC_DTAG_HI_SHIFT 8
+#define KNAV_DMA_DESC_DTAG_LO_SHIFT 0
+#define KNAV_DMA_DESC_HAS_EPIB BIT(31)
+#define KNAV_DMA_DESC_NO_EPIB 0
+#define KNAV_DMA_DESC_PSLEN_SHIFT 24
+#define KNAV_DMA_DESC_PSLEN_MASK MASK(6)
+#define KNAV_DMA_DESC_ERR_FLAG_SHIFT 20
+#define KNAV_DMA_DESC_ERR_FLAG_MASK MASK(4)
+#define KNAV_DMA_DESC_PSFLAG_SHIFT 16
+#define KNAV_DMA_DESC_PSFLAG_MASK MASK(4)
+#define KNAV_DMA_DESC_RETQ_SHIFT 0
+#define KNAV_DMA_DESC_RETQ_MASK MASK(14)
+#define KNAV_DMA_DESC_BUF_LEN_MASK MASK(22)
+
+#define KNAV_DMA_NUM_EPIB_WORDS 4
+#define KNAV_DMA_NUM_PS_WORDS 16
+#define KNAV_DMA_FDQ_PER_CHAN 4
+
+/* Tx channel scheduling priority */
+enum knav_dma_tx_priority {
+ DMA_PRIO_HIGH = 0,
+ DMA_PRIO_MED_H,
+ DMA_PRIO_MED_L,
+ DMA_PRIO_LOW
+};
+
+/* Rx channel error handling mode during buffer starvation */
+enum knav_dma_rx_err_mode {
+ DMA_DROP = 0,
+ DMA_RETRY
+};
+
+/* Rx flow size threshold configuration */
+enum knav_dma_rx_thresholds {
+ DMA_THRESH_NONE = 0,
+ DMA_THRESH_0 = 1,
+ DMA_THRESH_0_1 = 3,
+ DMA_THRESH_0_1_2 = 7
+};
+
+/* Descriptor type */
+enum knav_dma_desc_type {
+ DMA_DESC_HOST = 0,
+ DMA_DESC_MONOLITHIC = 2
+};
+
+/**
+ * struct knav_dma_tx_cfg: Tx channel configuration
+ * @filt_einfo: Filter extended packet info
+ * @filt_pswords: Filter PS words present
+ * @knav_dma_tx_priority: Tx channel scheduling priority
+ */
+struct knav_dma_tx_cfg {
+ bool filt_einfo;
+ bool filt_pswords;
+ enum knav_dma_tx_priority priority;
+};
+
+/**
+ * struct knav_dma_rx_cfg: Rx flow configuration
+ * @einfo_present: Extended packet info present
+ * @psinfo_present: PS words present
+ * @knav_dma_rx_err_mode: Error during buffer starvation
+ * @knav_dma_desc_type: Host or Monolithic desc
+ * @psinfo_at_sop: PS word located at start of packet
+ * @sop_offset: Start of packet offset
+ * @dst_q: Destination queue for a given flow
+ * @thresh: Rx flow size threshold
+ * @fdq[]: Free desc Queue array
+ * @sz_thresh0: RX packet size threshold 0
+ * @sz_thresh1: RX packet size threshold 1
+ * @sz_thresh2: RX packet size threshold 2
+ */
+struct knav_dma_rx_cfg {
+ bool einfo_present;
+ bool psinfo_present;
+ enum knav_dma_rx_err_mode err_mode;
+ enum knav_dma_desc_type desc_type;
+ bool psinfo_at_sop;
+ unsigned int sop_offset;
+ unsigned int dst_q;
+ enum knav_dma_rx_thresholds thresh;
+ unsigned int fdq[KNAV_DMA_FDQ_PER_CHAN];
+ unsigned int sz_thresh0;
+ unsigned int sz_thresh1;
+ unsigned int sz_thresh2;
+};
+
+/**
+ * struct knav_dma_cfg: Pktdma channel configuration
+ * @sl_cfg: Slave configuration
+ * @tx: Tx channel configuration
+ * @rx: Rx flow configuration
+ */
+struct knav_dma_cfg {
+ enum dma_transfer_direction direction;
+ union {
+ struct knav_dma_tx_cfg tx;
+ struct knav_dma_rx_cfg rx;
+ } u;
+};
+
+/**
+ * struct knav_dma_desc: Host packet descriptor layout
+ * @desc_info: Descriptor information like id, type, length
+ * @tag_info: Flow tag info written in during RX
+ * @packet_info: Queue Manager, policy, flags etc
+ * @buff_len: Buffer length in bytes
+ * @buff: Buffer pointer
+ * @next_desc: For chaining the descriptors
+ * @orig_len: length since 'buff_len' can be overwritten
+ * @orig_buff: buff pointer since 'buff' can be overwritten
+ * @epib: Extended packet info block
+ * @psdata: Protocol specific
+ */
+struct knav_dma_desc {
+ u32 desc_info;
+ u32 tag_info;
+ u32 packet_info;
+ u32 buff_len;
+ u32 buff;
+ u32 next_desc;
+ u32 orig_len;
+ u32 orig_buff;
+ u32 epib[KNAV_DMA_NUM_EPIB_WORDS];
+ u32 psdata[KNAV_DMA_NUM_PS_WORDS];
+ u32 pad[4];
+} ____cacheline_aligned;
+
+#if IS_ENABLED(CONFIG_KEYSTONE_NAVIGATOR_DMA)
+void *knav_dma_open_channel(struct device *dev, const char *name,
+ struct knav_dma_cfg *config);
+void knav_dma_close_channel(void *channel);
+#else
+static inline void *knav_dma_open_channel(struct device *dev, const char *name,
+ struct knav_dma_cfg *config)
+{
+ return (void *) NULL;
+}
+static inline void knav_dma_close_channel(void *channel)
+{}
+
+#endif
+
+#endif /* __SOC_TI_KEYSTONE_NAVIGATOR_DMA_H__ */
diff --git a/include/linux/soc/ti/knav_qmss.h b/include/linux/soc/ti/knav_qmss.h
new file mode 100644
index 0000000..9f0ebb3b
--- /dev/null
+++ b/include/linux/soc/ti/knav_qmss.h
@@ -0,0 +1,90 @@
+/*
+ * Keystone Navigator Queue Management Sub-System header
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Sandeep Nair <sandeep_n@ti.com>
+ * Cyril Chemparathy <cyril@ti.com>
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SOC_TI_KNAV_QMSS_H__
+#define __SOC_TI_KNAV_QMSS_H__
+
+#include <linux/err.h>
+#include <linux/time.h>
+#include <linux/atomic.h>
+#include <linux/device.h>
+#include <linux/fcntl.h>
+#include <linux/dma-mapping.h>
+
+/* queue types */
+#define KNAV_QUEUE_QPEND ((unsigned)-2) /* interruptible qpend queue */
+#define KNAV_QUEUE_ACC ((unsigned)-3) /* Accumulated queue */
+#define KNAV_QUEUE_GP ((unsigned)-4) /* General purpose queue */
+
+/* queue flags */
+#define KNAV_QUEUE_SHARED 0x0001 /* Queue can be shared */
+
+/**
+ * enum knav_queue_ctrl_cmd - queue operations.
+ * @KNAV_QUEUE_GET_ID: Get the ID number for an open queue
+ * @KNAV_QUEUE_FLUSH: forcibly empty a queue if possible
+ * @KNAV_QUEUE_SET_NOTIFIER: Set a notifier callback to a queue handle.
+ * @KNAV_QUEUE_ENABLE_NOTIFY: Enable notifier callback for a queue handle.
+ * @KNAV_QUEUE_DISABLE_NOTIFY: Disable notifier callback for a queue handle.
+ * @KNAV_QUEUE_GET_COUNT: Get number of queues.
+ */
+enum knav_queue_ctrl_cmd {
+ KNAV_QUEUE_GET_ID,
+ KNAV_QUEUE_FLUSH,
+ KNAV_QUEUE_SET_NOTIFIER,
+ KNAV_QUEUE_ENABLE_NOTIFY,
+ KNAV_QUEUE_DISABLE_NOTIFY,
+ KNAV_QUEUE_GET_COUNT
+};
+
+/* Queue notifier callback prototype */
+typedef void (*knav_queue_notify_fn)(void *arg);
+
+/**
+ * struct knav_queue_notify_config: Notifier configuration
+ * @fn: Notifier function
+ * @fn_arg: Notifier function arguments
+ */
+struct knav_queue_notify_config {
+ knav_queue_notify_fn fn;
+ void *fn_arg;
+};
+
+void *knav_queue_open(const char *name, unsigned id,
+ unsigned flags);
+void knav_queue_close(void *qhandle);
+int knav_queue_device_control(void *qhandle,
+ enum knav_queue_ctrl_cmd cmd,
+ unsigned long arg);
+dma_addr_t knav_queue_pop(void *qhandle, unsigned *size);
+int knav_queue_push(void *qhandle, dma_addr_t dma,
+ unsigned size, unsigned flags);
+
+void *knav_pool_create(const char *name,
+ int num_desc, int region_id);
+void knav_pool_destroy(void *ph);
+int knav_pool_count(void *ph);
+void *knav_pool_desc_get(void *ph);
+void knav_pool_desc_put(void *ph, void *desc);
+int knav_pool_desc_map(void *ph, void *desc, unsigned size,
+ dma_addr_t *dma, unsigned *dma_sz);
+void *knav_pool_desc_unmap(void *ph, dma_addr_t dma, unsigned dma_sz);
+dma_addr_t knav_pool_desc_virt_to_dma(void *ph, void *virt);
+void *knav_pool_desc_dma_to_virt(void *ph, dma_addr_t dma);
+
+#endif /* __SOC_TI_KNAV_QMSS_H__ */